mirror of https://github.com/odrling/Aegisub
FFMS2:
Rename lots of things, THIS BREAKS THE AEGISUB BUILD because of changed exported type and function names. Fixed an uninitialized memory bug that would make it crash on unindexed audio tracks in mastroska. Made ffms.h C-friendlier. Exports the start time of an audio track in the audio properties. Less signedness and type conversion warnings. Originally committed to SVN as r2940.
This commit is contained in:
parent
6a999030f6
commit
80a209e95a
|
@ -25,7 +25,7 @@
|
|||
#define _snprintf snprintf
|
||||
#endif
|
||||
|
||||
TAudioBlock::TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, int64_t SrcBytes) {
|
||||
TAudioBlock::TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, size_t SrcBytes) {
|
||||
this->Start = Start;
|
||||
this->Samples = Samples;
|
||||
Data = new uint8_t[SrcBytes];
|
||||
|
@ -96,16 +96,16 @@ int64_t TAudioCache::FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst) {
|
|||
return FFMIN(Ret, Start + Samples);
|
||||
}
|
||||
|
||||
AudioBase::AudioBase() {
|
||||
FFAudio::FFAudio() {
|
||||
CurrentSample = 0;
|
||||
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * 10];
|
||||
};
|
||||
|
||||
AudioBase::~AudioBase() {
|
||||
FFAudio::~FFAudio() {
|
||||
delete[] DecodingBuffer;
|
||||
};
|
||||
|
||||
size_t AudioBase::FindClosestAudioKeyFrame(int64_t Sample) {
|
||||
size_t FFAudio::FindClosestAudioKeyFrame(int64_t Sample) {
|
||||
for (size_t i = 0; i < Frames.size(); i++) {
|
||||
if (Frames[i].SampleStart == Sample && Frames[i].KeyFrame)
|
||||
return i;
|
||||
|
@ -121,15 +121,15 @@ void FFAudioSource::Free(bool CloseCodec) {
|
|||
av_close_input_file(FormatContext);
|
||||
}
|
||||
|
||||
FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
FormatContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
AudioTrack = Track;
|
||||
Frames = (*TrackIndices)[AudioTrack];
|
||||
Frames = (*Index)[AudioTrack];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio track contains no frames");
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio track contains no frames, was it indexed properly?");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
|
@ -168,12 +168,7 @@ FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FrameIndex *Trac
|
|||
av_seek_frame(FormatContext, AudioTrack, Frames[0].DTS, AVSEEK_FLAG_BACKWARD);
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
|
||||
|
||||
AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt);
|
||||
AP.Channels = CodecContext->channels;;
|
||||
AP.Float = AudioFMTIsFloat(CodecContext->sample_fmt);
|
||||
AP.SampleRate = CodecContext->sample_rate;
|
||||
AP.NumSamples = (Frames.back()).SampleStart;
|
||||
FillAP(AP, CodecContext, Frames);
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0) {
|
||||
Free(true);
|
||||
|
@ -318,16 +313,16 @@ void MatroskaAudioSource::Free(bool CloseCodec) {
|
|||
av_free(CodecContext);
|
||||
}
|
||||
|
||||
MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
CodecContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
TrackInfo *TI = NULL;
|
||||
CS = NULL;
|
||||
Frames = (*TrackIndices)[Track];
|
||||
Frames = (*Index)[Track];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio track contains no frames");
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio track contains no frames, was it indexed properly?");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
|
@ -383,11 +378,7 @@ MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, Fram
|
|||
}
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
|
||||
AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt);
|
||||
AP.Channels = CodecContext->channels;;
|
||||
AP.Float = AudioFMTIsFloat(CodecContext->sample_fmt);
|
||||
AP.SampleRate = CodecContext->sample_rate;
|
||||
AP.NumSamples = (Frames.back()).SampleStart;
|
||||
FillAP(AP, CodecContext, Frames);
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0) {
|
||||
Free(true);
|
||||
|
|
|
@ -50,7 +50,7 @@ public:
|
|||
int64_t Samples;
|
||||
uint8_t *Data;
|
||||
|
||||
TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, int64_t SrcBytes);
|
||||
TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, size_t SrcBytes);
|
||||
~TAudioBlock();
|
||||
};
|
||||
|
||||
|
@ -67,25 +67,25 @@ public:
|
|||
int64_t FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst);
|
||||
};
|
||||
|
||||
class AudioBase {
|
||||
struct FFAudio {
|
||||
protected:
|
||||
TAudioCache AudioCache;
|
||||
int64_t CurrentSample;
|
||||
uint8_t *DecodingBuffer;
|
||||
FrameInfoVector Frames;
|
||||
FFTrack Frames;
|
||||
AVCodecContext *CodecContext;
|
||||
AudioProperties AP;
|
||||
TAudioProperties AP;
|
||||
|
||||
size_t FindClosestAudioKeyFrame(int64_t Sample);
|
||||
public:
|
||||
AudioBase();
|
||||
~AudioBase();
|
||||
FFAudio();
|
||||
~FFAudio();
|
||||
|
||||
const AudioProperties& GetAudioProperties() { return AP; }
|
||||
const TAudioProperties& GetTAudioProperties() { return AP; }
|
||||
virtual int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) = 0;
|
||||
};
|
||||
|
||||
class FFAudioSource : public AudioBase {
|
||||
class FFAudioSource : public FFAudio {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
int AudioTrack;
|
||||
|
@ -93,13 +93,13 @@ private:
|
|||
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
FFAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
FFAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
~FFAudioSource();
|
||||
|
||||
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
};
|
||||
|
||||
class MatroskaAudioSource : public AudioBase {
|
||||
class MatroskaAudioSource : public FFAudio {
|
||||
private:
|
||||
MatroskaFile *MF;
|
||||
MatroskaReaderContext MC;
|
||||
|
@ -109,7 +109,7 @@ private:
|
|||
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, uint64_t FilePos, unsigned int FrameSize, char *ErrorMsg, unsigned MsgSize);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
MatroskaAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
MatroskaAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
~MatroskaAudioSource();
|
||||
|
||||
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
|
@ -117,14 +117,14 @@ public:
|
|||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
class HaaliAudioSource : public AudioBase {
|
||||
class HaaliAudioSource : public FFAudio {
|
||||
private:
|
||||
CComPtr<IMMContainer> pMMC;
|
||||
|
||||
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
HaaliAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
HaaliAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
~HaaliAudioSource();
|
||||
|
||||
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
|
|
|
@ -22,18 +22,18 @@
|
|||
#include "ffavisynth.h"
|
||||
#include "utils.h"
|
||||
|
||||
AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) {
|
||||
AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) {
|
||||
memset(&VI, 0, sizeof(VI));
|
||||
SWS = NULL;
|
||||
ConvertToFormat = PIX_FMT_NONE;
|
||||
this->FPSNum = FPSNum;
|
||||
this->FPSDen = FPSDen;
|
||||
|
||||
VS = FFMS_CreateVideoSource(SourceFile, Track, TrackIndices, PP, Threads, SeekMode, ErrorMsg, MsgSize);
|
||||
if (!VS)
|
||||
V = FFMS_CreateVideoSource(SourceFile, Track, Index, PP, Threads, SeekMode, ErrorMsg, MsgSize);
|
||||
if (!V)
|
||||
Env->ThrowError(ErrorMsg);
|
||||
|
||||
const VideoProperties VP = *FFMS_GetVideoProperties(VS);
|
||||
const TVideoProperties VP = *FFMS_GetTVideoProperties(V);
|
||||
|
||||
VI.image_type = VideoInfo::IT_TFF;
|
||||
VI.width = VP.Width;
|
||||
|
@ -42,7 +42,7 @@ AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, Fram
|
|||
if (FPSNum > 0 && FPSDen > 0) {
|
||||
VI.fps_denominator = FPSDen;
|
||||
VI.fps_numerator = FPSNum;
|
||||
VI.num_frames = ceil(((VP.LastTime - VP.FirstTime) * FPSNum) / FPSDen);
|
||||
VI.num_frames = static_cast<int>(ceil(((VP.LastTime - VP.FirstTime) * FPSNum) / FPSDen));
|
||||
} else {
|
||||
VI.fps_denominator = VP.FPSDenominator;
|
||||
VI.fps_numerator = VP.FPSNumerator;
|
||||
|
@ -52,7 +52,7 @@ AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, Fram
|
|||
try {
|
||||
InitOutputFormat(static_cast<PixelFormat>(VP.VPixelFormat), Env);
|
||||
} catch (AvisynthError &) {
|
||||
FFMS_DestroyVideoSource(VS);
|
||||
FFMS_DestroyVideoSource(V);
|
||||
throw;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, Fram
|
|||
AvisynthVideoSource::~AvisynthVideoSource() {
|
||||
if (SWS)
|
||||
sws_freeContext(SWS);
|
||||
FFMS_DestroyVideoSource(VS);
|
||||
FFMS_DestroyVideoSource(V);
|
||||
}
|
||||
|
||||
void AvisynthVideoSource::InitOutputFormat(enum PixelFormat CurrentFormat, IScriptEnvironment *Env) {
|
||||
|
@ -104,9 +104,9 @@ void AvisynthVideoSource::InitOutputFormat(enum PixelFormat CurrentFormat, IScri
|
|||
}
|
||||
}
|
||||
|
||||
PVideoFrame AvisynthVideoSource::OutputFrame(const AVFrameLite *Frame, IScriptEnvironment *Env) {
|
||||
PVideoFrame AvisynthVideoSource::OutputFrame(const TAVFrameLite *Frame, IScriptEnvironment *Env) {
|
||||
// Yes, this function is overly complex and could probably be simplified
|
||||
AVPicture *SrcPicture = reinterpret_cast<AVPicture *>(const_cast<AVFrameLite *>(Frame));
|
||||
AVPicture *SrcPicture = reinterpret_cast<AVPicture *>(const_cast<TAVFrameLite *>(Frame));
|
||||
PVideoFrame Dst = Env->NewVideoFrame(VI);
|
||||
|
||||
if (ConvertToFormat != PIX_FMT_NONE && VI.pixel_type == VideoInfo::CS_I420) {
|
||||
|
@ -140,12 +140,12 @@ PVideoFrame AvisynthVideoSource::OutputFrame(const AVFrameLite *Frame, IScriptEn
|
|||
PVideoFrame AvisynthVideoSource::GetFrame(int n, IScriptEnvironment *Env) {
|
||||
char ErrorMsg[1024];
|
||||
unsigned MsgSize = sizeof(ErrorMsg);
|
||||
const AVFrameLite *Frame;
|
||||
const TAVFrameLite *Frame;
|
||||
|
||||
if (FPSNum > 0 && FPSDen > 0)
|
||||
Frame = FFMS_GetFrameByTime(VS, FFMS_GetVideoProperties(VS)->FirstTime + (double)(n * (int64_t)FPSDen) / FPSNum, ErrorMsg, MsgSize);
|
||||
Frame = FFMS_GetFrameByTime(V, FFMS_GetTVideoProperties(V)->FirstTime + (double)(n * (int64_t)FPSDen) / FPSNum, ErrorMsg, MsgSize);
|
||||
else
|
||||
Frame = FFMS_GetFrame(VS, n, ErrorMsg, MsgSize);
|
||||
Frame = FFMS_GetFrame(V, n, ErrorMsg, MsgSize);
|
||||
|
||||
if (Frame == NULL)
|
||||
Env->ThrowError("FFVideoSource: %s", ErrorMsg);
|
||||
|
@ -154,14 +154,14 @@ PVideoFrame AvisynthVideoSource::GetFrame(int n, IScriptEnvironment *Env) {
|
|||
return OutputFrame(Frame, Env);
|
||||
}
|
||||
|
||||
AvisynthAudioSource::AvisynthAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) {
|
||||
AvisynthAudioSource::AvisynthAudioSource(const char *SourceFile, int Track, FFIndex *Index, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) {
|
||||
memset(&VI, 0, sizeof(VI));
|
||||
|
||||
AS = FFMS_CreateAudioSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
if (!AS)
|
||||
A = FFMS_CreateAudioSource(SourceFile, Track, Index, ErrorMsg, MsgSize);
|
||||
if (!A)
|
||||
Env->ThrowError(ErrorMsg);
|
||||
|
||||
const AudioProperties AP = *FFMS_GetAudioProperties(AS);
|
||||
const TAudioProperties AP = *FFMS_GetTAudioProperties(A);
|
||||
|
||||
VI.nchannels = AP.Channels;
|
||||
VI.num_audio_samples = AP.NumSamples;
|
||||
|
@ -181,12 +181,12 @@ AvisynthAudioSource::AvisynthAudioSource(const char *SourceFile, int Track, Fram
|
|||
}
|
||||
|
||||
AvisynthAudioSource::~AvisynthAudioSource() {
|
||||
FFMS_DestroyAudioSource(AS);
|
||||
FFMS_DestroyAudioSource(A);
|
||||
}
|
||||
|
||||
void AvisynthAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env) {
|
||||
char ErrorMsg[1024];
|
||||
unsigned MsgSize = sizeof(ErrorMsg);
|
||||
if (FFMS_GetAudio(AS, Buf, Start, Count, ErrorMsg, MsgSize))
|
||||
if (FFMS_GetAudio(A, Buf, Start, Count, ErrorMsg, MsgSize))
|
||||
Env->ThrowError(ErrorMsg);
|
||||
}
|
|
@ -35,16 +35,16 @@ extern "C" {
|
|||
class AvisynthVideoSource : public IClip {
|
||||
private:
|
||||
VideoInfo VI;
|
||||
VideoBase *VS;
|
||||
FFVideo *V;
|
||||
SwsContext *SWS;
|
||||
PixelFormat ConvertToFormat;
|
||||
int FPSNum;
|
||||
int FPSDen;
|
||||
|
||||
void InitOutputFormat(enum PixelFormat CurrentFormat, IScriptEnvironment *Env);
|
||||
PVideoFrame OutputFrame(const AVFrameLite *SrcPicture, IScriptEnvironment *Env);
|
||||
PVideoFrame OutputFrame(const TAVFrameLite *SrcPicture, IScriptEnvironment *Env);
|
||||
public:
|
||||
AvisynthVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize);
|
||||
AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize);
|
||||
~AvisynthVideoSource();
|
||||
bool __stdcall GetParity(int n) { return false; }
|
||||
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
|
||||
|
@ -56,9 +56,9 @@ public:
|
|||
class AvisynthAudioSource : public IClip {
|
||||
private:
|
||||
VideoInfo VI;
|
||||
AudioBase *AS;
|
||||
FFAudio *A;
|
||||
public:
|
||||
AvisynthAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize);
|
||||
AvisynthAudioSource(const char *SourceFile, int Track, FFIndex *Index, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize);
|
||||
~AvisynthAudioSource();
|
||||
bool __stdcall GetParity(int n) { return false; }
|
||||
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
|
||||
|
|
|
@ -60,21 +60,21 @@ AVSValue __cdecl CreateFFIndex(AVSValue Args, void* UserData, IScriptEnvironment
|
|||
// 1: Index generated
|
||||
// 2: Index forced to be overwritten
|
||||
|
||||
FrameIndex *Index = NULL;
|
||||
FFIndex *Index = NULL;
|
||||
if (OverWrite || !(Index = FFMS_ReadIndex(CacheFile, ErrorMsg, MsgSize))) {
|
||||
if (!(Index = FFMS_MakeIndex(Source, IndexMask, DumpMask, AudioFile, true, NULL, NULL, ErrorMsg, MsgSize)))
|
||||
Env->ThrowError("FFIndex: %s", ErrorMsg);
|
||||
if (FFMS_WriteIndex(CacheFile, Index, ErrorMsg, MsgSize)) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
Env->ThrowError("FFIndex: %s", ErrorMsg);
|
||||
}
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
if (!OverWrite)
|
||||
return AVSValue(1);
|
||||
else
|
||||
return AVSValue(2);
|
||||
} else {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return AVSValue(0);
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
if (!strcmp(CacheFile, ""))
|
||||
CacheFile = DefaultCache.c_str();
|
||||
|
||||
FrameIndex *Index = NULL;
|
||||
FFIndex *Index = NULL;
|
||||
if (Cache)
|
||||
Index = FFMS_ReadIndex(CacheFile, ErrorMsg, MsgSize);
|
||||
if (!Index) {
|
||||
|
@ -127,7 +127,7 @@ AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
|
||||
if (Cache)
|
||||
if (FFMS_WriteIndex(CacheFile, Index, ErrorMsg, MsgSize)) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
Env->ThrowError("FFVideoSource: %s", ErrorMsg);
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
|
||||
if (strcmp(Timecodes, "")) {
|
||||
if (FFMS_WriteTimecodes(FFMS_GetTITrackIndex(Index, Track, ErrorMsg, MsgSize), Timecodes, ErrorMsg, MsgSize)) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
Env->ThrowError("FFVideoSource: %s", ErrorMsg);
|
||||
}
|
||||
}
|
||||
|
@ -149,11 +149,11 @@ AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
try {
|
||||
Filter = new AvisynthVideoSource(Source, Track, Index, FPSNum, FPSDen, PP, Threads, SeekMode, Env, ErrorMsg, MsgSize);
|
||||
} catch (...) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
throw;
|
||||
}
|
||||
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return Filter;
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
if (!strcmp(CacheFile, ""))
|
||||
CacheFile = DefaultCache.c_str();
|
||||
|
||||
FrameIndex *Index = NULL;
|
||||
FFIndex *Index = NULL;
|
||||
if (Cache)
|
||||
Index = FFMS_ReadIndex(CacheFile, ErrorMsg, MsgSize);
|
||||
if (!Index) {
|
||||
|
@ -188,7 +188,7 @@ AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
|
||||
if (Cache)
|
||||
if (FFMS_WriteIndex(CacheFile, Index, ErrorMsg, MsgSize)) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
Env->ThrowError("FFAudioSource: %s", ErrorMsg);
|
||||
}
|
||||
}
|
||||
|
@ -203,11 +203,11 @@ AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvir
|
|||
try {
|
||||
Filter = new AvisynthAudioSource(Source, Track, Index, Env, ErrorMsg, MsgSize);
|
||||
} catch (...) {
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
throw;
|
||||
}
|
||||
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return Filter;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ extern "C" {
|
|||
#define _snprintf snprintf
|
||||
#endif
|
||||
|
||||
FrameInfo::FrameInfo(int64_t DTS, bool KeyFrame) {
|
||||
TFrameInfo::TFrameInfo(int64_t DTS, bool KeyFrame) {
|
||||
this->DTS = DTS;
|
||||
this->SampleStart = 0;
|
||||
this->FilePos = 0;
|
||||
|
@ -39,7 +39,7 @@ FrameInfo::FrameInfo(int64_t DTS, bool KeyFrame) {
|
|||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame) {
|
||||
TFrameInfo::TFrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame) {
|
||||
this->DTS = DTS;
|
||||
this->SampleStart = SampleStart;
|
||||
this->FilePos = 0;
|
||||
|
@ -47,7 +47,7 @@ FrameInfo::FrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame) {
|
|||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame) {
|
||||
TFrameInfo::TFrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame) {
|
||||
this->DTS = 0;
|
||||
this->SampleStart = SampleStart;
|
||||
this->FilePos = FilePos;
|
||||
|
@ -72,14 +72,14 @@ FFMS_API(void) FFMS_SetLogLevel(int Level) {
|
|||
av_log_set_level(AV_LOG_QUIET);
|
||||
}
|
||||
|
||||
FFMS_API(VideoBase *) FFMS_CreateVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFMS_API(FFVideo *) FFMS_CreateVideoSource(const char *SourceFile, int Track, FFIndex *Index, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize) {
|
||||
try {
|
||||
switch (TrackIndices->Decoder) {
|
||||
case 0: return new FFVideoSource(SourceFile, Track, TrackIndices, PP, Threads, SeekMode, ErrorMsg, MsgSize);
|
||||
case 1: return new MatroskaVideoSource(SourceFile, Track, TrackIndices, PP, Threads, ErrorMsg, MsgSize);
|
||||
switch (Index->Decoder) {
|
||||
case 0: return new FFVideoSource(SourceFile, Track, Index, PP, Threads, SeekMode, ErrorMsg, MsgSize);
|
||||
case 1: return new MatroskaVideoSource(SourceFile, Track, Index, PP, Threads, ErrorMsg, MsgSize);
|
||||
#ifdef HAALISOURCE
|
||||
case 2: return new HaaliVideoSource(SourceFile, Track, TrackIndices, PP, Threads, 0, ErrorMsg, MsgSize);
|
||||
case 3: return new HaaliVideoSource(SourceFile, Track, TrackIndices, PP, Threads, 1, ErrorMsg, MsgSize);
|
||||
case 2: return new HaaliVideoSource(SourceFile, Track, Index, PP, Threads, 0, ErrorMsg, MsgSize);
|
||||
case 3: return new HaaliVideoSource(SourceFile, Track, Index, PP, Threads, 1, ErrorMsg, MsgSize);
|
||||
#endif
|
||||
default:
|
||||
_snprintf(ErrorMsg, MsgSize, "Unsupported format");
|
||||
|
@ -90,11 +90,11 @@ FFMS_API(VideoBase *) FFMS_CreateVideoSource(const char *SourceFile, int Track,
|
|||
}
|
||||
}
|
||||
|
||||
FFMS_API(AudioBase *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFMS_API(FFAudio *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
try {
|
||||
switch (TrackIndices->Decoder) {
|
||||
case 0: return new FFAudioSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
case 1: return new MatroskaAudioSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
switch (Index->Decoder) {
|
||||
case 0: return new FFAudioSource(SourceFile, Track, Index, ErrorMsg, MsgSize);
|
||||
case 1: return new MatroskaAudioSource(SourceFile, Track, Index, ErrorMsg, MsgSize);
|
||||
default:
|
||||
_snprintf(ErrorMsg, MsgSize, "Unsupported format");
|
||||
return NULL;
|
||||
|
@ -104,117 +104,122 @@ FFMS_API(AudioBase *) FFMS_CreateAudioSource(const char *SourceFile, int Track,
|
|||
}
|
||||
}
|
||||
|
||||
FFMS_API(void) FFMS_DestroyVideoSource(VideoBase *VB) {
|
||||
delete VB;
|
||||
FFMS_API(void) FFMS_DestroyVideoSource(FFVideo *V) {
|
||||
delete V;
|
||||
}
|
||||
|
||||
FFMS_API(void) FFMS_DestroyAudioSource(AudioBase *AB) {
|
||||
delete AB;
|
||||
FFMS_API(void) FFMS_DestroyAudioSource(FFAudio *A) {
|
||||
delete A;
|
||||
}
|
||||
|
||||
FFMS_API(const VideoProperties *) FFMS_GetVideoProperties(VideoBase *VB) {
|
||||
return &VB->GetVideoProperties();
|
||||
FFMS_API(const TVideoProperties *) FFMS_GetTVideoProperties(FFVideo *V) {
|
||||
return &V->GetTVideoProperties();
|
||||
}
|
||||
|
||||
FFMS_API(const AudioProperties *) FFMS_GetAudioProperties(AudioBase *AB) {
|
||||
return &AB->GetAudioProperties();
|
||||
FFMS_API(const TAudioProperties *) FFMS_GetTAudioProperties(FFAudio *A) {
|
||||
return &A->GetTAudioProperties();
|
||||
}
|
||||
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrame(VideoBase *VB, int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
return (AVFrameLite *)VB->GetFrame(n, ErrorMsg, MsgSize);
|
||||
FFMS_API(const TAVFrameLite *) FFMS_GetFrame(FFVideo *V, int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
return (TAVFrameLite *)V->GetFrame(n, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrameByTime(VideoBase *VB, double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
return (AVFrameLite *)VB->GetFrameByTime(Time, ErrorMsg, MsgSize);
|
||||
FFMS_API(const TAVFrameLite *) FFMS_GetFrameByTime(FFVideo *V, double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
return (TAVFrameLite *)V->GetFrameByTime(Time, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetAudio(AudioBase *AB, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
return AB->GetAudio(Buf, Start, Count, ErrorMsg, MsgSize);
|
||||
FFMS_API(int) FFMS_GetAudio(FFAudio *A, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
return A->GetAudio(Buf, Start, Count, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_SetOutputFormat(VideoBase *VB, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
|
||||
return VB->SetOutputFormat(TargetFormat, Width, Height, ErrorMsg, MsgSize);
|
||||
FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
|
||||
return V->SetOutputFormat(TargetFormat, Width, Height, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(void) FFMS_ResetOutputFormat(VideoBase *VB) {
|
||||
VB->ResetOutputFormat();
|
||||
FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V) {
|
||||
V->ResetOutputFormat();
|
||||
}
|
||||
|
||||
FFMS_API(void) FFMS_DestroyFrameIndex(FrameIndex *FI) {
|
||||
delete FI;
|
||||
FFMS_API(void) FFMS_DestroyFFIndex(FFIndex *Index) {
|
||||
delete Index;
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetFirstTrackOfType(FrameIndex *TrackIndices, int TrackType, char *ErrorMsg, unsigned MsgSize) {
|
||||
for (int i = 0; i < TrackIndices->size(); i++)
|
||||
if ((*TrackIndices)[i].TT == TrackType)
|
||||
FFMS_API(int) FFMS_GetFirstTrackOfType(FFIndex *Index, int TrackType, char *ErrorMsg, unsigned MsgSize) {
|
||||
for (int i = 0; i < static_cast<int>(Index->size()); i++)
|
||||
if ((*Index)[i].TT == TrackType)
|
||||
return i;
|
||||
_snprintf(ErrorMsg, MsgSize, "No suitable track found");
|
||||
return -1;
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetNumTracks(FrameIndex *TrackIndices) {
|
||||
return TrackIndices->size();
|
||||
FFMS_API(int) FFMS_GetNumTracks(FFIndex *Index) {
|
||||
return Index->size();
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetTrackType(FrameInfoVector *FIV) {
|
||||
return FIV->TT;
|
||||
FFMS_API(int) FFMS_GetTrackType(FFTrack *T) {
|
||||
return T->TT;
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetNumFrames(FrameInfoVector *FIV) {
|
||||
return FIV->size();
|
||||
FFMS_API(int) FFMS_GetNumFrames(FFTrack *T) {
|
||||
return T->size();
|
||||
}
|
||||
|
||||
FFMS_API(const FrameInfo *) FFMS_GetFrameInfo(FrameInfoVector *FIV, int Frame, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Frame < 0 || Frame >= FIV->size()) {
|
||||
FFMS_API(const TFrameInfo *) FFMS_GetTFrameInfo(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Frame < 0 || Frame >= static_cast<int>(T->size())) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Invalid frame specified");
|
||||
return NULL;
|
||||
} else {
|
||||
return &(*FIV)[Frame];
|
||||
return &(*T)[Frame];
|
||||
}
|
||||
}
|
||||
|
||||
FFMS_API(FrameInfoVector *) FFMS_GetTITrackIndex(FrameIndex *TrackIndices, int Track, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Track < 0 || Track >= TrackIndices->size()) {
|
||||
FFMS_API(FFTrack *) FFMS_GetTITrackIndex(FFIndex *Index, int Track, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Track < 0 || Track >= static_cast<int>(Index->size())) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Invalid track specified");
|
||||
return NULL;
|
||||
} else {
|
||||
return &(*TrackIndices)[Track];
|
||||
return &(*Index)[Track];
|
||||
}
|
||||
}
|
||||
|
||||
FFMS_API(FrameInfoVector *) FFMS_GetVSTrackIndex(VideoBase *VB) {
|
||||
return VB->GetFrameInfoVector();
|
||||
FFMS_API(FFTrack *) FFMS_GetVSTrackIndex(FFVideo *V) {
|
||||
return V->GetFFTrack();
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_FindClosestKeyFrame(FrameInfoVector *FIV, int Frame, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Frame < 0 || Frame >= FIV->size()) {
|
||||
FFMS_API(int) FFMS_FindClosestKeyFrame(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Frame < 0 || Frame >= static_cast<int>(T->size())) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Out of range frame specified");
|
||||
return -1;
|
||||
} else {
|
||||
return FIV->FindClosestKeyFrame(Frame);
|
||||
return T->FindClosestKeyFrame(Frame);
|
||||
}
|
||||
}
|
||||
|
||||
FFMS_API(const TrackTimeBase *) FFMS_GetTimeBase(FrameInfoVector *FIV) {
|
||||
return &FIV->TB;
|
||||
FFMS_API(const TTrackTimeBase *) FFMS_GetTimeBase(FFTrack *T) {
|
||||
return &T->TB;
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_WriteTimecodes(FrameInfoVector *FIV, const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
return FIV->WriteTimecodes(TimecodeFile, ErrorMsg, MsgSize);
|
||||
FFMS_API(int) FFMS_WriteTimecodes(FFTrack *T, const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
return T->WriteTimecodes(TimecodeFile, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(FrameIndex *) FFMS_MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFMS_API(FFIndex *) FFMS_MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
return MakeIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, IP, Private, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(FrameIndex *) FFMS_ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFMS_API(FFIndex *) FFMS_ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
return ReadIndex(IndexFile, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
return WriteIndex(IndexFile, TrackIndices, ErrorMsg, MsgSize);
|
||||
FFMS_API(int) FFMS_WriteIndex(const char *IndexFile, FFIndex *Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
return WriteIndex(IndexFile, Index, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetPixFmt(const char *Name) {
|
||||
return avcodec_get_pix_fmt(Name);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_DefaultAudioName(const char *SourceFile, int Track, const TAudioProperties *AP, char *FileName, unsigned FNSize) {
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -41,13 +41,10 @@
|
|||
# define FFMS_API(ret) EXTERN_C ret FFMS_CC
|
||||
#endif
|
||||
|
||||
class VideoBase;
|
||||
class AudioBase;
|
||||
class FrameIndex;
|
||||
class FrameInfoVector;
|
||||
|
||||
typedef int (FFMS_CC *IndexCallback)(int State, int64_t Current, int64_t Total, void *Private);
|
||||
typedef int (FFMS_CC *IndexCallback)(int State, int64_t Current, int64_t Total, void *Private);
|
||||
struct FFVideo;
|
||||
struct FFAudio;
|
||||
struct FFIndex;
|
||||
struct FFTrack;
|
||||
|
||||
enum FFMS_SeekMode {
|
||||
FFMS_SEEK_LINEAR_NO_RW = -1,
|
||||
|
@ -65,9 +62,9 @@ enum FFMS_TrackType {
|
|||
// This is a subset of the original AVFrame only containing the most used parts.
|
||||
// Even if it might seem like a good idea to cast it back to a full AVFrame to
|
||||
// access a few more values you really shouldn't do that. Only the values present
|
||||
// in AVFrameLite are actually updated when postprocessing is used.
|
||||
// in TAVFrameLite are actually updated when postprocessing is used.
|
||||
|
||||
struct AVFrameLite {
|
||||
struct TAVFrameLite {
|
||||
uint8_t *Data[4];
|
||||
int Linesize[4];
|
||||
uint8_t *Base[4];
|
||||
|
@ -75,24 +72,25 @@ struct AVFrameLite {
|
|||
int PictType;
|
||||
};
|
||||
|
||||
struct TrackTimeBase {
|
||||
struct TTrackTimeBase {
|
||||
int64_t Num;
|
||||
int64_t Den;
|
||||
};
|
||||
|
||||
class FrameInfo {
|
||||
public:
|
||||
struct TFrameInfo {
|
||||
int64_t DTS;
|
||||
int64_t SampleStart;
|
||||
int64_t FilePos;
|
||||
unsigned int FrameSize;
|
||||
bool KeyFrame;
|
||||
FrameInfo(int64_t DTS, bool KeyFrame);
|
||||
FrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame);
|
||||
FrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame);
|
||||
#ifdef FFMS_EXPORTS
|
||||
TFrameInfo(int64_t DTS, bool KeyFrame);
|
||||
TFrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame);
|
||||
TFrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct VideoProperties {
|
||||
struct TVideoProperties {
|
||||
int Width;
|
||||
int Height;
|
||||
int FPSDenominator;
|
||||
|
@ -109,46 +107,49 @@ struct VideoProperties {
|
|||
double LastTime;
|
||||
};
|
||||
|
||||
struct AudioProperties {
|
||||
struct TAudioProperties {
|
||||
int SampleRate;
|
||||
int Channels;
|
||||
int BitsPerSample;
|
||||
int Delay;
|
||||
bool Float;
|
||||
int64_t NumSamples;
|
||||
double FirstTime;
|
||||
};
|
||||
|
||||
typedef int (FFMS_CC *TIndexCallback)(int State, int64_t Current, int64_t Total, void *Private);
|
||||
typedef int (FFMS_CC *TAudioNameCallback)(const char *SourceFile, int Track, const TAudioProperties *AP, char *FileName, unsigned FNSize);
|
||||
|
||||
// Most functions return 0 on success
|
||||
// Functions without error message output can be assumed to never fail
|
||||
FFMS_API(void) FFMS_Init();
|
||||
FFMS_API(int) FFMS_GetLogLevel();
|
||||
FFMS_API(void) FFMS_SetLogLevel(int Level);
|
||||
FFMS_API(VideoBase *) FFMS_CreateVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(AudioBase *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(void) FFMS_DestroyVideoSource(VideoBase *VB);
|
||||
FFMS_API(void) FFMS_DestroyAudioSource(AudioBase *AB);
|
||||
FFMS_API(const VideoProperties *) FFMS_GetVideoProperties(VideoBase *VB);
|
||||
FFMS_API(const AudioProperties *) FFMS_GetAudioProperties(AudioBase *AB);
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrame(VideoBase *VB, int n, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrameByTime(VideoBase *VB, double Time, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetAudio(AudioBase *AB, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_SetOutputFormat(VideoBase *VB, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(void) FFMS_ResetOutputFormat(VideoBase *VB);
|
||||
FFMS_API(void) FFMS_DestroyFrameIndex(FrameIndex *FI);
|
||||
FFMS_API(int) FFMS_GetFirstTrackOfType(FrameIndex *TrackIndices, int TrackType, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetNumTracks(FrameIndex *TrackIndices);
|
||||
FFMS_API(int) FFMS_GetTrackType(FrameInfoVector *FIV);
|
||||
FFMS_API(int) FFMS_GetNumFrames(FrameInfoVector *FIV);
|
||||
FFMS_API(const FrameInfo *) FFMS_GetFrameInfo(FrameInfoVector *FIV, int Frame, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FrameInfoVector *) FFMS_GetTITrackIndex(FrameIndex *TrackIndices, int Track, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FrameInfoVector *) FFMS_GetVSTrackIndex(VideoBase *VB);
|
||||
FFMS_API(FrameInfoVector *) FFMS_GetASTrackIndex(AudioBase *AB);
|
||||
FFMS_API(int) FFMS_FindClosestKeyFrame(FrameInfoVector *FIV, int Frame, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(const TrackTimeBase *) FFMS_GetTimeBase(FrameInfoVector *FIV);
|
||||
FFMS_API(int) FFMS_WriteTimecodes(FrameInfoVector *FIV, const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FrameIndex *) FFMS_MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FrameIndex *) FFMS_ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFVideo *) FFMS_CreateVideoSource(const char *SourceFile, int Track, FFIndex *Index, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFAudio *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(void) FFMS_DestroyVideoSource(FFVideo *V);
|
||||
FFMS_API(void) FFMS_DestroyAudioSource(FFAudio *A);
|
||||
FFMS_API(const TVideoProperties *) FFMS_GetTVideoProperties(FFVideo *V);
|
||||
FFMS_API(const TAudioProperties *) FFMS_GetTAudioProperties(FFAudio *A);
|
||||
FFMS_API(const TAVFrameLite *) FFMS_GetFrame(FFVideo *V, int n, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(const TAVFrameLite *) FFMS_GetFrameByTime(FFVideo *V, double Time, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetAudio(FFAudio *A, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V);
|
||||
FFMS_API(void) FFMS_DestroyFFIndex(FFIndex *Index);
|
||||
FFMS_API(int) FFMS_GetFirstTrackOfType(FFIndex *Index, int TrackType, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetNumTracks(FFIndex *Index);
|
||||
FFMS_API(int) FFMS_GetTrackType(FFTrack *T);
|
||||
FFMS_API(int) FFMS_GetNumFrames(FFTrack *T);
|
||||
FFMS_API(const TFrameInfo *) FFMS_GetTFrameInfo(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFTrack *) FFMS_GetTITrackIndex(FFIndex *Index, int Track, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFTrack *) FFMS_GetVSTrackIndex(FFVideo *V);
|
||||
FFMS_API(FFTrack *) FFMS_GetASTrackIndex(FFAudio *A);
|
||||
FFMS_API(int) FFMS_FindClosestKeyFrame(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(const TTrackTimeBase *) FFMS_GetTimeBase(FFTrack *T);
|
||||
FFMS_API(int) FFMS_WriteTimecodes(FFTrack *T, const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFIndex *) FFMS_MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(FFIndex *) FFMS_ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_WriteIndex(const char *IndexFile, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetPixFmt(const char *Name);
|
||||
//FFMS_API(int) FFMS_DefaultAudioName(const char *SourceFile, int Track, const AudioProperties *AP, char *FileName, unsigned FNSize);
|
||||
FFMS_API(int) FFMS_DefaultAudioName(const char *SourceFile, int Track, const TAudioProperties *AP, char *FileName, unsigned FNSize);
|
||||
#endif
|
||||
|
|
|
@ -237,6 +237,11 @@ Note that --enable-w32threads is required for multithreaded decoding to work.
|
|||
<h2>Changes</h2>
|
||||
<ul>
|
||||
<li>2.00 beta 9<ul>
|
||||
<li>Fixed an access violation occurring when unindexed or empty audio tracks in matroska files were opened</li>
|
||||
<li>Less type conversion/signedness warnings</li>
|
||||
<li>When audio track dumping is performed a custom callback can now be supplied to name the tracks</li>
|
||||
<li>The audio track delay is now exposed in the API in the same way as video tracks</li>
|
||||
<li>A big type and argument name cleanup in the API, many things have been renamed to be clearer and it should be completely C friendly now</li>
|
||||
<li>Removed FFNoLog and replaced it with FFSetLogLevel and FFGetLogLevel, the default logging is now also set to quiet, the magical numbers to supply it can be found in avutil/log.h</li>
|
||||
<li>Updated FFmpeg to rev X</li>
|
||||
</ul></li>
|
||||
|
|
|
@ -66,7 +66,7 @@ int main(int argc, char *argv[]) {
|
|||
int FMT_YUY2 = FFMS_GetPixFmt("yuv422p");
|
||||
|
||||
av_md5_init(ctx);
|
||||
FrameIndex *FI = FFMS_MakeIndex(argv[1], -1, 0, NULL, false, UpdateProgress, argv[1], ErrorMsg, sizeof(ErrorMsg));
|
||||
FFIndex *FI = FFMS_MakeIndex(argv[1], -1, 0, NULL, false, UpdateProgress, argv[1], ErrorMsg, sizeof(ErrorMsg));
|
||||
if (!FI) {
|
||||
cout << "Indexing error: " << ErrorMsg << endl;
|
||||
return 1;
|
||||
|
@ -78,16 +78,16 @@ int main(int argc, char *argv[]) {
|
|||
return 2;
|
||||
}
|
||||
|
||||
VideoBase *FV = FFMS_CreateVideoSource(argv[1], track, FI, "", 1, 1, ErrorMsg, sizeof(ErrorMsg));
|
||||
FFMS_DestroyFrameIndex(FI);
|
||||
if (!FV) {
|
||||
FFVideo *V = FFMS_CreateVideoSource(argv[1], track, FI, "", 1, 1, ErrorMsg, sizeof(ErrorMsg));
|
||||
FFMS_DestroyFFIndex(FI);
|
||||
if (!V) {
|
||||
cout << "Video source error: " << ErrorMsg << endl;
|
||||
return 3;
|
||||
}
|
||||
|
||||
const VideoProperties *VP = FFMS_GetVideoProperties(FV);
|
||||
const TVideoProperties *VP = FFMS_GetTVideoProperties(V);
|
||||
for (int i = 0; i < VP->NumFrames; i++) {
|
||||
const AVFrameLite *AVF = FFMS_GetFrame(FV, i, ErrorMsg, sizeof(ErrorMsg));
|
||||
const TAVFrameLite *AVF = FFMS_GetFrame(V, i, ErrorMsg, sizeof(ErrorMsg));
|
||||
if (!AVF) {
|
||||
cout << "Frame request error: " << ErrorMsg << " at frame " << i << endl;
|
||||
return 4;
|
||||
|
@ -133,7 +133,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
FFMS_DestroyVideoSource(FV);
|
||||
FFMS_DestroyVideoSource(V);
|
||||
av_md5_final(ctx, md5sum);
|
||||
|
||||
delete[] reinterpret_cast<uint8_t *>(ctx);
|
||||
|
|
|
@ -36,7 +36,7 @@ std::string InputFile;
|
|||
std::string CacheFile;
|
||||
std::string AudioFile;
|
||||
|
||||
FrameIndex *Index;
|
||||
FFIndex *Index;
|
||||
|
||||
|
||||
void PrintUsage () {
|
||||
|
@ -195,18 +195,18 @@ int main(int argc, char *argv[]) {
|
|||
DoIndexing();
|
||||
} catch (const char *Error) {
|
||||
std::cout << Error << std::endl;
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return 1;
|
||||
} catch (std::string Error) {
|
||||
std::cout << std::endl << Error << std::endl;
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return 1;
|
||||
} catch (...) {
|
||||
std::cout << std::endl << "Unknown error" << std::endl;
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return 1;
|
||||
}
|
||||
|
||||
FFMS_DestroyFrameIndex(Index);
|
||||
FFMS_DestroyFFIndex(Index);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#define _snprintf snprintf
|
||||
#endif
|
||||
|
||||
int VideoBase::InitPP(const char *PP, PixelFormat PixelFormat, char *ErrorMsg, unsigned MsgSize) {
|
||||
int FFVideo::InitPP(const char *PP, PixelFormat PixelFormat, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (PP == NULL || !strcmp(PP, ""))
|
||||
return 0;
|
||||
|
||||
|
@ -66,7 +66,7 @@ int VideoBase::InitPP(const char *PP, PixelFormat PixelFormat, char *ErrorMsg, u
|
|||
return 0;
|
||||
}
|
||||
|
||||
AVFrameLite *VideoBase::OutputFrame(AVFrame *Frame) {
|
||||
TAVFrameLite *FFVideo::OutputFrame(AVFrame *Frame) {
|
||||
if (PPContext) {
|
||||
pp_postprocess(const_cast<const uint8_t **>(Frame->data), Frame->linesize, PPFrame->data, PPFrame->linesize, VP.Width, VP.Height, Frame->qscale_table, Frame->qstride, PPMode, PPContext, Frame->pict_type | (Frame->qscale_type ? PP_PICT_TYPE_QP2 : 0));
|
||||
PPFrame->key_frame = Frame->key_frame;
|
||||
|
@ -79,10 +79,10 @@ AVFrameLite *VideoBase::OutputFrame(AVFrame *Frame) {
|
|||
FinalFrame->pict_type = PPFrame->pict_type;
|
||||
}
|
||||
|
||||
return reinterpret_cast<AVFrameLite *>(FinalFrame);
|
||||
return reinterpret_cast<TAVFrameLite *>(FinalFrame);
|
||||
}
|
||||
|
||||
VideoBase::VideoBase() {
|
||||
FFVideo::FFVideo() {
|
||||
memset(&VP, 0, sizeof(VP));
|
||||
PPContext = NULL;
|
||||
PPMode = NULL;
|
||||
|
@ -95,7 +95,7 @@ VideoBase::VideoBase() {
|
|||
FinalFrame = PPFrame;
|
||||
}
|
||||
|
||||
VideoBase::~VideoBase() {
|
||||
FFVideo::~FFVideo() {
|
||||
if (PPMode)
|
||||
pp_free_mode(PPMode);
|
||||
if (PPContext)
|
||||
|
@ -113,12 +113,12 @@ VideoBase::~VideoBase() {
|
|||
av_free(DecodeFrame);
|
||||
}
|
||||
|
||||
AVFrameLite *VideoBase::GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
TAVFrameLite *FFVideo::GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
int Frame = Frames.ClosestFrameFromDTS((Time * 1000 * Frames.TB.Den) / Frames.TB.Num);
|
||||
return GetFrame(Frame, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
int VideoBase::SetOutputFormat(int TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
|
||||
int FFVideo::SetOutputFormat(int TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
|
||||
// FIXME: investigate the possible bug in avcodec_find_best_pix_fmt
|
||||
// int Loss;
|
||||
// int OutputFormat = avcodec_find_best_pix_fmt(TargetFormats,
|
||||
|
@ -162,7 +162,7 @@ int VideoBase::SetOutputFormat(int TargetFormats, int Width, int Height, char *E
|
|||
return 0;
|
||||
}
|
||||
|
||||
void VideoBase::ResetOutputFormat() {
|
||||
void FFVideo::ResetOutputFormat() {
|
||||
if (SWS)
|
||||
sws_freeContext(SWS);
|
||||
SWS = NULL;
|
||||
|
@ -179,14 +179,14 @@ void FFVideoSource::Free(bool CloseCodec) {
|
|||
//av_free(FormatContext);
|
||||
}
|
||||
|
||||
FFVideoSource::FFVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices,
|
||||
FFVideoSource::FFVideoSource(const char *SourceFile, int Track, FFIndex *Index,
|
||||
const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
||||
FormatContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
this->SeekMode = SeekMode;
|
||||
VideoTrack = Track;
|
||||
Frames = (*TrackIndices)[VideoTrack];
|
||||
Frames = (*Index)[VideoTrack];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Video track contains no frames");
|
||||
|
@ -315,7 +315,7 @@ Done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
AVFrameLite *FFVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
TAVFrameLite *FFVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
// PPFrame always holds frame LastFrameNum even if no PP is applied
|
||||
if (LastFrameNum == n)
|
||||
return OutputFrame(DecodeFrame);
|
||||
|
@ -398,7 +398,7 @@ void MatroskaVideoSource::Free(bool CloseCodec) {
|
|||
}
|
||||
|
||||
MatroskaVideoSource::MatroskaVideoSource(const char *SourceFile, int Track,
|
||||
FrameIndex *TrackIndices, const char *PP,
|
||||
FFIndex *Index, const char *PP,
|
||||
int Threads, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
||||
AVCodec *Codec = NULL;
|
||||
|
@ -406,7 +406,7 @@ MatroskaVideoSource::MatroskaVideoSource(const char *SourceFile, int Track,
|
|||
TrackInfo *TI = NULL;
|
||||
CS = NULL;
|
||||
VideoTrack = Track;
|
||||
Frames = (*TrackIndices)[VideoTrack];
|
||||
Frames = (*Index)[VideoTrack];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Video track contains no frames");
|
||||
|
@ -553,7 +553,7 @@ Done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
AVFrameLite *MatroskaVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
TAVFrameLite *MatroskaVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
// PPFrame always holds frame LastFrameNum even if no PP is applied
|
||||
if (LastFrameNum == n)
|
||||
return OutputFrame(DecodeFrame);
|
||||
|
@ -597,13 +597,13 @@ void HaaliVideoSource::Free(bool CloseCodec) {
|
|||
}
|
||||
|
||||
HaaliVideoSource::HaaliVideoSource(const char *SourceFile, int Track,
|
||||
FrameIndex *TrackIndices, const char *PP,
|
||||
FFIndex *Index, const char *PP,
|
||||
int Threads, int SourceMode, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
||||
AVCodec *Codec = NULL;
|
||||
CodecContext = NULL;
|
||||
VideoTrack = Track;
|
||||
Frames = (*TrackIndices)[VideoTrack];
|
||||
Frames = (*Index)[VideoTrack];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Video track contains no frames");
|
||||
|
@ -798,7 +798,7 @@ Done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
AVFrameLite *HaaliVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
TAVFrameLite *HaaliVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
|
||||
// PPFrame always holds frame LastFrameNum even if no PP is applied
|
||||
if (LastFrameNum == n)
|
||||
return OutputFrame(DecodeFrame);
|
||||
|
|
|
@ -44,36 +44,36 @@ extern "C" {
|
|||
# include "guids.h"
|
||||
#endif
|
||||
|
||||
class VideoBase {
|
||||
struct FFVideo {
|
||||
private:
|
||||
pp_context_t *PPContext;
|
||||
pp_mode_t *PPMode;
|
||||
SwsContext *SWS;
|
||||
protected:
|
||||
VideoProperties VP;
|
||||
TVideoProperties VP;
|
||||
AVFrame *DecodeFrame;
|
||||
AVFrame *PPFrame;
|
||||
AVFrame *FinalFrame;
|
||||
int LastFrameNum;
|
||||
FrameInfoVector Frames;
|
||||
FFTrack Frames;
|
||||
int VideoTrack;
|
||||
int CurrentFrame;
|
||||
AVCodecContext *CodecContext;
|
||||
|
||||
VideoBase();
|
||||
FFVideo();
|
||||
int InitPP(const char *PP, PixelFormat PixelFormat, char *ErrorMsg, unsigned MsgSize);
|
||||
AVFrameLite *OutputFrame(AVFrame *Frame);
|
||||
TAVFrameLite *OutputFrame(AVFrame *Frame);
|
||||
public:
|
||||
virtual ~VideoBase();
|
||||
const VideoProperties& GetVideoProperties() { return VP; }
|
||||
FrameInfoVector *GetFrameInfoVector() { return &Frames; }
|
||||
virtual AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize) = 0;
|
||||
AVFrameLite *GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize);
|
||||
virtual ~FFVideo();
|
||||
const TVideoProperties& GetTVideoProperties() { return VP; }
|
||||
FFTrack *GetFFTrack() { return &Frames; }
|
||||
virtual TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize) = 0;
|
||||
TAVFrameLite *GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize);
|
||||
int SetOutputFormat(int TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize);
|
||||
void ResetOutputFormat();
|
||||
};
|
||||
|
||||
class FFVideoSource : public VideoBase {
|
||||
class FFVideoSource : public FFVideo {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
int SeekMode;
|
||||
|
@ -81,12 +81,12 @@ private:
|
|||
void Free(bool CloseCodec);
|
||||
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS, char *ErrorMsg, unsigned MsgSize);
|
||||
public:
|
||||
FFVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize);
|
||||
FFVideoSource(const char *SourceFile, int Track, FFIndex *Index, const char *PP, int Threads, int SeekMode, char *ErrorMsg, unsigned MsgSize);
|
||||
~FFVideoSource();
|
||||
AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
};
|
||||
|
||||
class MatroskaVideoSource : public VideoBase {
|
||||
class MatroskaVideoSource : public FFVideo {
|
||||
private:
|
||||
MatroskaFile *MF;
|
||||
MatroskaReaderContext MC;
|
||||
|
@ -96,23 +96,23 @@ private:
|
|||
void Free(bool CloseCodec);
|
||||
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize);
|
||||
public:
|
||||
MatroskaVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, char *ErrorMsg, unsigned MsgSize);
|
||||
MatroskaVideoSource(const char *SourceFile, int Track, FFIndex *Index, const char *PP, int Threads, char *ErrorMsg, unsigned MsgSize);
|
||||
~MatroskaVideoSource();
|
||||
AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
};
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
class HaaliVideoSource : public VideoBase {
|
||||
class HaaliVideoSource : public FFVideo {
|
||||
private:
|
||||
CComPtr<IMMContainer> pMMC;
|
||||
uint8_t * CodecPrivate;
|
||||
void Free(bool CloseCodec);
|
||||
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize);
|
||||
public:
|
||||
HaaliVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, int SourceMode, char *ErrorMsg, unsigned MsgSize);
|
||||
HaaliVideoSource(const char *SourceFile, int Track, FFIndex *Index, const char *PP, int Threads, int SourceMode, char *ErrorMsg, unsigned MsgSize);
|
||||
~HaaliVideoSource();
|
||||
AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
|
||||
};
|
||||
|
||||
#endif // HAALISOURCE
|
||||
|
|
|
@ -163,19 +163,19 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
static bool DTSComparison(FrameInfo FI1, FrameInfo FI2) {
|
||||
static bool DTSComparison(TFrameInfo FI1, TFrameInfo FI2) {
|
||||
return FI1.DTS < FI2.DTS;
|
||||
}
|
||||
|
||||
static void SortTrackIndices(FrameIndex *TrackIndices) {
|
||||
for (FrameIndex::iterator Cur=TrackIndices->begin(); Cur!=TrackIndices->end(); Cur++)
|
||||
static void SortTrackIndices(FFIndex *Index) {
|
||||
for (FFIndex::iterator Cur=Index->begin(); Cur!=Index->end(); Cur++)
|
||||
std::sort(Cur->begin(), Cur->end(), DTSComparison);
|
||||
}
|
||||
|
||||
int WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
std::ofstream Index(IndexFile, std::ios::out | std::ios::binary | std::ios::trunc);
|
||||
int WriteIndex(const char *IndexFile, FFIndex *Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
std::ofstream IndexStream(IndexFile, std::ios::out | std::ios::binary | std::ios::trunc);
|
||||
|
||||
if (!Index.is_open()) {
|
||||
if (!IndexStream.is_open()) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Failed to open '%s' for writing", IndexFile);
|
||||
return 1;
|
||||
}
|
||||
|
@ -184,29 +184,29 @@ int WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg,
|
|||
IndexHeader IH;
|
||||
IH.Id = INDEXID;
|
||||
IH.Version = INDEXVERSION;
|
||||
IH.Tracks = TrackIndices->size();
|
||||
IH.Decoder = TrackIndices->Decoder;
|
||||
Index.write(reinterpret_cast<char *>(&IH), sizeof(IH));
|
||||
IH.Tracks = Index->size();
|
||||
IH.Decoder = Index->Decoder;
|
||||
IndexStream.write(reinterpret_cast<char *>(&IH), sizeof(IH));
|
||||
|
||||
for (unsigned int i = 0; i < IH.Tracks; i++) {
|
||||
int TT = (*TrackIndices)[i].TT;
|
||||
Index.write(reinterpret_cast<char *>(&TT), sizeof(TT));
|
||||
int64_t Num = (*TrackIndices)[i].TB.Num;
|
||||
Index.write(reinterpret_cast<char *>(&Num), sizeof(Num));
|
||||
int64_t Den = (*TrackIndices)[i].TB.Den;
|
||||
Index.write(reinterpret_cast<char *>(&Den), sizeof(Den));
|
||||
size_t Frames = (*TrackIndices)[i].size();
|
||||
Index.write(reinterpret_cast<char *>(&Frames), sizeof(Frames));
|
||||
int TT = (*Index)[i].TT;
|
||||
IndexStream.write(reinterpret_cast<char *>(&TT), sizeof(TT));
|
||||
int64_t Num = (*Index)[i].TB.Num;
|
||||
IndexStream.write(reinterpret_cast<char *>(&Num), sizeof(Num));
|
||||
int64_t Den = (*Index)[i].TB.Den;
|
||||
IndexStream.write(reinterpret_cast<char *>(&Den), sizeof(Den));
|
||||
size_t Frames = (*Index)[i].size();
|
||||
IndexStream.write(reinterpret_cast<char *>(&Frames), sizeof(Frames));
|
||||
|
||||
for (size_t j = 0; j < Frames; j++)
|
||||
Index.write(reinterpret_cast<char *>(&(TrackIndices->at(i)[j])), sizeof(FrameInfo));
|
||||
IndexStream.write(reinterpret_cast<char *>(&(Index->at(i)[j])), sizeof(TFrameInfo));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, int SourceMode, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
static FFIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, int SourceMode, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
::CoInitializeEx(NULL, COINIT_MULTITHREADED);
|
||||
|
||||
CLSID clsid = HAALI_TS_Parser;
|
||||
|
@ -260,7 +260,7 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
|
|||
MatroskaAudioContext *AudioContexts;
|
||||
HaaliIndexMemory IM = HaaliIndexMemory(NumTracks, db, AudioContexts);
|
||||
|
||||
FrameIndex *TrackIndices = new FrameIndex();
|
||||
FFIndex *TrackIndices = new FFIndex();
|
||||
TrackIndices->Decoder = 2;
|
||||
if (SourceMode == 1)
|
||||
TrackIndices->Decoder = 3;
|
||||
|
@ -299,7 +299,7 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
|
|||
}
|
||||
}
|
||||
|
||||
TrackIndices->push_back(FrameInfoVector(1, 1000000000, TrackTypes[CurrentTrack] - 1));
|
||||
TrackIndices->push_back(FFTrack(1, 1000000000, TrackTypes[CurrentTrack] - 1));
|
||||
|
||||
if (IndexMask & (1 << CurrentTrack) && TrackTypes[CurrentTrack] == TT_AUDIO) {
|
||||
AVCodecContext *AudioCodecContext = avcodec_alloc_context();
|
||||
|
@ -354,9 +354,9 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
|
|||
|
||||
// Only create index entries for video for now to save space
|
||||
if (TrackTypes[CurrentTrack] == TT_VIDEO) {
|
||||
(*TrackIndices)[CurrentTrack].push_back(FrameInfo(Ts, pMMF->IsSyncPoint() == S_OK));
|
||||
(*TrackIndices)[CurrentTrack].push_back(TFrameInfo(Ts, pMMF->IsSyncPoint() == S_OK));
|
||||
} else if (TrackTypes[CurrentTrack] == TT_AUDIO && (IndexMask & (1 << CurrentTrack))) {
|
||||
(*TrackIndices)[CurrentTrack].push_back(FrameInfo(AudioContexts[CurrentTrack].CurrentSample, 0 /* FIXME? */, pMMF->GetActualDataLength(), pMMF->IsSyncPoint() == S_OK));
|
||||
(*TrackIndices)[CurrentTrack].push_back(TFrameInfo(AudioContexts[CurrentTrack].CurrentSample, 0 /* FIXME? */, pMMF->GetActualDataLength(), pMMF->IsSyncPoint() == S_OK));
|
||||
AVCodecContext *AudioCodecContext = AudioContexts[CurrentTrack].CTX;
|
||||
pMMF->GetPointer(&TempPacket.data);
|
||||
TempPacket.size = pMMF->GetActualDataLength();
|
||||
|
@ -387,6 +387,9 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
|
|||
if (dbsize > 0 && (DumpMask & (1 << CurrentTrack))) {
|
||||
// Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers.
|
||||
if (!AudioContexts[CurrentTrack].W64W) {
|
||||
TAudioProperties AP;
|
||||
AVCodecContext *CTX = AudioContexts[CurrentTrack].CTX;
|
||||
AP.BitsPerSample = CTX->bits_per_coded_sample;
|
||||
char ABuf[50];
|
||||
std::string WN(AudioFile);
|
||||
_snprintf(ABuf, sizeof(ABuf), ".%02d.delay.%d.w64", CurrentTrack, 0);
|
||||
|
@ -407,7 +410,7 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
|
|||
}
|
||||
#endif
|
||||
|
||||
static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
static FFIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
MatroskaFile *MF;
|
||||
char ErrorMessage[256];
|
||||
MatroskaReaderContext MC;
|
||||
|
@ -480,11 +483,11 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
|
|||
int64_t SourceSize = _ftelli64(MC.ST.fp);
|
||||
_fseeki64(MC.ST.fp, CurrentPos, SEEK_SET);
|
||||
|
||||
FrameIndex *TrackIndices = new FrameIndex();
|
||||
FFIndex *TrackIndices = new FFIndex();
|
||||
TrackIndices->Decoder = 1;
|
||||
|
||||
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
|
||||
TrackIndices->push_back(FrameInfoVector(mkv_TruncFloat(mkv_GetTrackInfo(MF, i)->TimecodeScale), 1000000, mkv_GetTrackInfo(MF, i)->Type - 1));
|
||||
TrackIndices->push_back(FFTrack(mkv_TruncFloat(mkv_GetTrackInfo(MF, i)->TimecodeScale), 1000000, mkv_GetTrackInfo(MF, i)->Type - 1));
|
||||
|
||||
ulonglong StartTime, EndTime, FilePos;
|
||||
unsigned int Track, FrameFlags, FrameSize;
|
||||
|
@ -503,9 +506,9 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
|
|||
|
||||
// Only create index entries for video for now to save space
|
||||
if (mkv_GetTrackInfo(MF, Track)->Type == TT_VIDEO) {
|
||||
(*TrackIndices)[Track].push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
|
||||
} else if (mkv_GetTrackInfo(MF, Track)->Type == TT_AUDIO && (IndexMask & (1 << Track))) {
|
||||
(*TrackIndices)[Track].push_back(FrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
|
||||
ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize);
|
||||
AVCodecContext *AudioCodecContext = AudioContexts[Track].CTX;
|
||||
TempPacket.data = MC.Buffer;
|
||||
|
@ -557,7 +560,7 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
|
|||
return TrackIndices;
|
||||
}
|
||||
|
||||
FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
|
||||
AVFormatContext *FormatContext = NULL;
|
||||
IndexMask |= DumpMask;
|
||||
|
||||
|
@ -620,11 +623,11 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
|
|||
|
||||
//
|
||||
|
||||
FrameIndex *TrackIndices = new FrameIndex();
|
||||
FFIndex *TrackIndices = new FFIndex();
|
||||
TrackIndices->Decoder = 0;
|
||||
|
||||
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
|
||||
TrackIndices->push_back(FrameInfoVector((int64_t)FormatContext->streams[i]->time_base.num * 1000,
|
||||
TrackIndices->push_back(FFTrack((int64_t)FormatContext->streams[i]->time_base.num * 1000,
|
||||
FormatContext->streams[i]->time_base.den,
|
||||
FormatContext->streams[i]->codec->codec_type));
|
||||
|
||||
|
@ -643,9 +646,9 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
|
|||
|
||||
// Only create index entries for video for now to save space
|
||||
if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
(*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
(*TrackIndices)[Packet.stream_index].push_back(TFrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
} else if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO && (IndexMask & (1 << Packet.stream_index))) {
|
||||
(*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, AudioContexts[Packet.stream_index].CurrentSample, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
(*TrackIndices)[Packet.stream_index].push_back(TFrameInfo(Packet.dts, AudioContexts[Packet.stream_index].CurrentSample, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec;
|
||||
TempPacket.data = Packet.data;
|
||||
TempPacket.size = Packet.size;
|
||||
|
@ -699,7 +702,7 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
|
|||
return TrackIndices;
|
||||
}
|
||||
|
||||
FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
FFIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
std::ifstream Index(IndexFile, std::ios::in | std::ios::binary);
|
||||
|
||||
if (!Index.is_open()) {
|
||||
|
@ -720,7 +723,7 @@ FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
FrameIndex *TrackIndices = new FrameIndex();
|
||||
FFIndex *TrackIndices = new FFIndex();
|
||||
|
||||
try {
|
||||
|
||||
|
@ -736,11 +739,11 @@ FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
|||
Index.read(reinterpret_cast<char *>(&Den), sizeof(Den));
|
||||
size_t Frames;
|
||||
Index.read(reinterpret_cast<char *>(&Frames), sizeof(Frames));
|
||||
TrackIndices->push_back(FrameInfoVector(Num, Den, TT));
|
||||
TrackIndices->push_back(FFTrack(Num, Den, TT));
|
||||
|
||||
FrameInfo FI(0, false);
|
||||
TFrameInfo FI(0, false);
|
||||
for (size_t j = 0; j < Frames; j++) {
|
||||
Index.read(reinterpret_cast<char *>(&FI), sizeof(FrameInfo));
|
||||
Index.read(reinterpret_cast<char *>(&FI), sizeof(TFrameInfo));
|
||||
TrackIndices->at(i).push_back(FI);
|
||||
}
|
||||
}
|
||||
|
@ -754,7 +757,7 @@ FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
|||
return TrackIndices;
|
||||
}
|
||||
|
||||
int FrameInfoVector::WriteTimecodes(const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
int FFTrack::WriteTimecodes(const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
std::ofstream Timecodes(TimecodeFile, std::ios::out | std::ios::trunc);
|
||||
|
||||
if (!Timecodes.is_open()) {
|
||||
|
@ -770,14 +773,14 @@ int FrameInfoVector::WriteTimecodes(const char *TimecodeFile, char *ErrorMsg, un
|
|||
return 0;
|
||||
}
|
||||
|
||||
int FrameInfoVector::FrameFromDTS(int64_t DTS) {
|
||||
int FFTrack::FrameFromDTS(int64_t DTS) {
|
||||
for (int i = 0; i < static_cast<int>(size()); i++)
|
||||
if (at(i).DTS == DTS)
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int FrameInfoVector::ClosestFrameFromDTS(int64_t DTS) {
|
||||
int FFTrack::ClosestFrameFromDTS(int64_t DTS) {
|
||||
int Frame = 0;
|
||||
int64_t BestDiff = 0xFFFFFFFFFFFFFFLL; // big number
|
||||
for (int i = 0; i < static_cast<int>(size()); i++) {
|
||||
|
@ -791,21 +794,21 @@ int FrameInfoVector::ClosestFrameFromDTS(int64_t DTS) {
|
|||
return Frame;
|
||||
}
|
||||
|
||||
int FrameInfoVector::FindClosestKeyFrame(int Frame) {
|
||||
Frame = FFMIN(FFMAX(Frame, 0), size() - 1);
|
||||
int FFTrack::FindClosestKeyFrame(int Frame) {
|
||||
Frame = FFMIN(FFMAX(Frame, 0), static_cast<int>(size()) - 1);
|
||||
for (int i = Frame; i > 0; i--)
|
||||
if (at(i).KeyFrame)
|
||||
return i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
FrameInfoVector::FrameInfoVector() {
|
||||
FFTrack::FFTrack() {
|
||||
this->TT = 0;
|
||||
this->TB.Num = 0;
|
||||
this->TB.Den = 0;
|
||||
}
|
||||
|
||||
FrameInfoVector::FrameInfoVector(int64_t Num, int64_t Den, int TT) {
|
||||
FFTrack::FFTrack(int64_t Num, int64_t Den, int TT) {
|
||||
this->TT = TT;
|
||||
this->TB.Num = Num;
|
||||
this->TB.Den = Den;
|
||||
|
|
|
@ -35,27 +35,25 @@ struct IndexHeader {
|
|||
uint32_t Decoder;
|
||||
};
|
||||
|
||||
class FrameInfoVector : public std::vector<FrameInfo> {
|
||||
public:
|
||||
struct FFTrack : public std::vector<TFrameInfo> {
|
||||
int TT;
|
||||
TrackTimeBase TB;
|
||||
TTrackTimeBase TB;
|
||||
|
||||
int FindClosestKeyFrame(int Frame);
|
||||
int FrameFromDTS(int64_t DTS);
|
||||
int ClosestFrameFromDTS(int64_t DTS);
|
||||
int WriteTimecodes(const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize);
|
||||
|
||||
FrameInfoVector();
|
||||
FrameInfoVector(int64_t Num, int64_t Den, int TT);
|
||||
FFTrack();
|
||||
FFTrack(int64_t Num, int64_t Den, int TT);
|
||||
};
|
||||
|
||||
class FrameIndex : public std::vector<FrameInfoVector> {
|
||||
public:
|
||||
struct FFIndex : public std::vector<FFTrack> {
|
||||
int Decoder;
|
||||
};
|
||||
|
||||
FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize);
|
||||
FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize);
|
||||
int WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
FFIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, TIndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize);
|
||||
FFIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize);
|
||||
int WriteIndex(const char *IndexFile, FFIndex *Index, char *ErrorMsg, unsigned MsgSize);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -108,7 +108,7 @@ longlong StdIoGetFileSize(StdIoStream *st) {
|
|||
}
|
||||
|
||||
void InitStdIoStream(StdIoStream *st) {
|
||||
memset(st,0,sizeof(st));
|
||||
memset(st,0,sizeof(StdIoStream));
|
||||
st->base.read = StdIoRead;
|
||||
st->base.scan = StdIoScan;
|
||||
st->base.getcachesize = StdIoGetCacheSize;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
// THE SOFTWARE.
|
||||
|
||||
#include "utils.h"
|
||||
#include "indexing.h"
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#ifdef _MSC_VER
|
||||
|
@ -137,6 +138,15 @@ void InitNullPacket(AVPacket *pkt) {
|
|||
pkt->size = 0;
|
||||
}
|
||||
|
||||
void FillAP(TAudioProperties &AP, AVCodecContext *CTX, FFTrack &Frames) {
|
||||
AP.BitsPerSample = av_get_bits_per_sample_format(CTX->sample_fmt);
|
||||
AP.Channels = CTX->channels;;
|
||||
AP.Float = AudioFMTIsFloat(CTX->sample_fmt);
|
||||
AP.SampleRate = CTX->sample_rate;
|
||||
AP.NumSamples = (Frames.back()).SampleStart;
|
||||
AP.FirstTime = ((Frames.front().DTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
}
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
unsigned vtSize(VARIANT &vt) {
|
||||
|
|
|
@ -31,6 +31,8 @@ extern "C" {
|
|||
#include "stdiostream.h"
|
||||
}
|
||||
|
||||
#include "ffms.h"
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
# define _WIN32_DCOM
|
||||
# include <windows.h>
|
||||
|
@ -63,6 +65,7 @@ int GetCPUFlags();
|
|||
int ReadFrame(uint64_t FilePos, unsigned int &FrameSize, CompressedStream *CS, MatroskaReaderContext &Context, char *ErrorMsg, unsigned MsgSize);
|
||||
bool AudioFMTIsFloat(SampleFormat FMT);
|
||||
void InitNullPacket(AVPacket *pkt);
|
||||
void FillAP(TAudioProperties &AP, AVCodecContext *CTX, FFTrack &Frames);
|
||||
#ifdef HAALISOURCE
|
||||
unsigned vtSize(VARIANT &vt);
|
||||
void vtCopy(VARIANT& vt,void *dest);
|
||||
|
|
Loading…
Reference in New Issue