FFmpegSource 1.10

Originally committed to SVN as r1548.
This commit is contained in:
Fredrik Mellbin 2007-08-31 20:26:51 +00:00
parent 89d076c760
commit 36aecac4c3
9 changed files with 1170 additions and 862 deletions

View File

@ -1,19 +1,5 @@
#include "ffmpegsource.h" #include "ffmpegsource.h"
int GetSWSCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= SWS_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= SWS_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= SWS_CPU_CAPS_3DNOW;
return Flags;
}
int FFBase::FrameFromDTS(int64_t ADTS) { int FFBase::FrameFromDTS(int64_t ADTS) {
for (int i = 0; i < (int)FrameToDTS.size(); i++) for (int i = 0; i < (int)FrameToDTS.size(); i++)
if (FrameToDTS[i].DTS == ADTS) if (FrameToDTS[i].DTS == ADTS)
@ -101,37 +87,203 @@ bool FFBase::SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int6
return true; return true;
} }
bool FFBase::PrepareAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
static FLAC__StreamDecoderReadStatus FLACStreamDecoderReadCallback(const FLAC__StreamDecoder *ADecoder, FLAC__byte ABuffer[], size_t *ABytes, FFBase *AOwner) {
if(*ABytes > 0) {
*ABytes = fread(ABuffer, sizeof(FLAC__byte), *ABytes, AOwner->FCFile);
if(ferror(AOwner->FCFile))
return FLAC__STREAM_DECODER_READ_STATUS_ABORT;
else if(*ABytes == 0)
return FLAC__STREAM_DECODER_READ_STATUS_END_OF_STREAM;
else
return FLAC__STREAM_DECODER_READ_STATUS_CONTINUE;
} else {
return FLAC__STREAM_DECODER_READ_STATUS_ABORT;
}
}
static FLAC__StreamDecoderSeekStatus FLACStreamDecoderSeekCallback(const FLAC__StreamDecoder *ADecoder, FLAC__uint64 AAbsoluteByteOffset, FFBase *AOwner) {
if(_fseeki64(AOwner->FCFile, AAbsoluteByteOffset, SEEK_SET) < 0)
return FLAC__STREAM_DECODER_SEEK_STATUS_ERROR;
else
return FLAC__STREAM_DECODER_SEEK_STATUS_OK;
}
static FLAC__StreamDecoderTellStatus FLACStreamDecoderTellCallback(const FLAC__StreamDecoder *ADecoder, FLAC__uint64 *AAbsoluteByteOffset, FFBase *AOwner) {
__int64 Pos;
if ((Pos = _ftelli64(AOwner->FCFile)) < 0) {
return FLAC__STREAM_DECODER_TELL_STATUS_ERROR;
} else {
*AAbsoluteByteOffset = (FLAC__uint64)Pos;
return FLAC__STREAM_DECODER_TELL_STATUS_OK;
}
}
static FLAC__StreamDecoderLengthStatus FLACStreamDecoderLengthCallback(const FLAC__StreamDecoder *ADecoder, FLAC__uint64 *AStreamLength, FFBase *AOwner) {
__int64 OriginalPos;
__int64 Length;
if ((OriginalPos = _ftelli64(AOwner->FCFile)) < 0)
return FLAC__STREAM_DECODER_LENGTH_STATUS_ERROR;
_fseeki64(AOwner->FCFile, 0, SEEK_END);
if ((Length = _ftelli64(AOwner->FCFile)) < 0)
return FLAC__STREAM_DECODER_LENGTH_STATUS_ERROR;
_fseeki64(AOwner->FCFile, OriginalPos, SEEK_SET);
*AStreamLength = Length;
return FLAC__STREAM_DECODER_LENGTH_STATUS_OK;
}
static FLAC__bool FLACStreamDecoderEofCallback(const FLAC__StreamDecoder *ADecoder, FFBase *AOwner) {
return feof(AOwner->FCFile) ? true : false;
}
static FLAC__StreamDecoderWriteStatus FLACStreamDecoderWriteCallback(const FLAC__StreamDecoder *ADecoder, const FLAC__Frame *AFrame, const FLAC__int32 *const ABuffer[], FFBase *AOwner) {
unsigned Blocksize = AFrame->header.blocksize;
const VideoInfo VI = AOwner->GetVideoInfo();
int16_t *Buffer = (int16_t *)AOwner->FCBuffer;
int j = 0;
while (AOwner->FCCount > 0 && Blocksize > 0) {
for (int i = 0; i < VI.nchannels; i++)
*Buffer++ = ABuffer[i][j];
j++;
AOwner->FCCount--;
Blocksize--;
}
AOwner->FCBuffer = Buffer;
return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
}
static void FLACStreamDecoderMetadataCallback(const FLAC__StreamDecoder *ADecoder, const FLAC__StreamMetadata *AMetadata, FFBase *AOwner) {
AOwner->FCError = (AMetadata->data.stream_info.total_samples <= 0);
}
static void FLACStreamDecoderErrorCallback(const FLAC__StreamDecoder *ADecoder, FLAC__StreamDecoderErrorStatus AStatus, FFBase *AOwner) {
AOwner->FCError = true;
}
bool FFBase::OpenAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024]; char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack); sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, "")) if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename; AAudioCacheFile = DefaultCacheFilename;
bool IsWritable = false; // Is an empty file?
FCFile = fopen(AAudioCacheFile, "rb");
AudioCache = fopen(AAudioCacheFile, "rb"); int64_t CacheSize;
if (!AudioCache) { if (FCFile) {
AudioCache = fopen(AAudioCacheFile, "wb+"); _fseeki64(FCFile, 0, SEEK_END);
if (!AudioCache) CacheSize = _ftelli64(FCFile);
Env->ThrowError("FFmpegSource: Failed to open the audio cache file for writing"); _fseeki64(FCFile, 0, SEEK_SET);
IsWritable = true; if (CacheSize <= 0) {
fclose(FCFile);
FCFile = NULL;
return false;
}
} else {
return false; return false;
} }
_fseeki64(AudioCache, 0, SEEK_END); // If FLAC?
int64_t CacheSize = _ftelli64(AudioCache); FLACAudioCache = FLAC__stream_decoder_new();
if (CacheSize > 0) { if (FLAC__stream_decoder_init_stream(FLACAudioCache,
VI.num_audio_samples = VI.AudioSamplesFromBytes(CacheSize); &(FLAC__StreamDecoderReadCallback)FLACStreamDecoderReadCallback,
return true; &(FLAC__StreamDecoderSeekCallback)FLACStreamDecoderSeekCallback,
&(FLAC__StreamDecoderTellCallback)FLACStreamDecoderTellCallback,
&(FLAC__StreamDecoderLengthCallback)FLACStreamDecoderLengthCallback,
&(FLAC__StreamDecoderEofCallback)FLACStreamDecoderEofCallback,
&(FLAC__StreamDecoderWriteCallback)FLACStreamDecoderWriteCallback,
&(FLAC__StreamDecoderMetadataCallback)FLACStreamDecoderMetadataCallback, &(FLAC__StreamDecoderErrorCallback)FLACStreamDecoderErrorCallback, this) == FLAC__STREAM_DECODER_INIT_STATUS_OK) {
FCError = true;
FLAC__stream_decoder_process_until_end_of_metadata(FLACAudioCache);
if (!FCError) {
VI.num_audio_samples = FLAC__stream_decoder_get_total_samples(FLACAudioCache);
AudioCacheType = acFLAC;
return true;
}
} }
FLAC__stream_decoder_delete(FLACAudioCache);
FLACAudioCache = NULL;
if (!IsWritable) { // Raw audio
AudioCache = freopen(AAudioCacheFile, "wb+", AudioCache); VI.num_audio_samples = VI.AudioSamplesFromBytes(CacheSize);
if (!AudioCache) AudioCacheType = acRaw;
Env->ThrowError("FFmpegSource: Failed to open the audio cache file for writing"); RawAudioCache = FCFile;
FCFile = NULL;
return true;
}
static FLAC__StreamEncoderWriteStatus FLACStreamEncoderWriteCallback(const FLAC__StreamEncoder *ASncoder, const FLAC__byte ABuffer[], size_t ABytes, unsigned ABamples, unsigned ACurrentFrame, FFBase *AOwner) {
fwrite(ABuffer, sizeof(FLAC__byte), ABytes, AOwner->FCFile);
if(ferror(AOwner->FCFile))
return FLAC__STREAM_ENCODER_WRITE_STATUS_FATAL_ERROR;
else
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}
static FLAC__StreamEncoderSeekStatus FLACStreamEncoderSeekCallback(const FLAC__StreamEncoder *AEncoder, FLAC__uint64 AAbsoluteByteOffset, FFBase *AOwner) {
if(_fseeki64(AOwner->FCFile, AAbsoluteByteOffset, SEEK_SET) < 0)
return FLAC__STREAM_ENCODER_SEEK_STATUS_ERROR;
else
return FLAC__STREAM_ENCODER_SEEK_STATUS_OK;
}
static FLAC__StreamEncoderTellStatus FLACStreamEncoderTellCallback(const FLAC__StreamEncoder *AEncoder, FLAC__uint64 *AAbsoluteByteOffset, FFBase *AOwner) {
__int64 Pos;
if((Pos = _ftelli64(AOwner->FCFile)) < 0) {
return FLAC__STREAM_ENCODER_TELL_STATUS_ERROR;
} else {
*AAbsoluteByteOffset = (FLAC__uint64)Pos;
return FLAC__STREAM_ENCODER_TELL_STATUS_OK;
} }
}
return false; FLAC__StreamEncoder *FFBase::NewFLACCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, int ACompression, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FCFile = fopen(AAudioCacheFile, "wb");
if (!FCFile)
Env->ThrowError("FFmpegSource: Failed to open '%s' for writing", AAudioCacheFile);
FLAC__StreamEncoder *FSE;
FSE = FLAC__stream_encoder_new();
FLAC__stream_encoder_set_channels(FSE, VI.nchannels);
FLAC__stream_encoder_set_bits_per_sample(FSE, VI.BytesPerChannelSample() * 8);
FLAC__stream_encoder_set_sample_rate(FSE, VI.audio_samples_per_second);
FLAC__stream_encoder_set_compression_level(FSE, ACompression);
if (FLAC__stream_encoder_init_stream(FSE, &(FLAC__StreamEncoderWriteCallback)FLACStreamEncoderWriteCallback,
&(FLAC__StreamEncoderSeekCallback)FLACStreamEncoderSeekCallback,
&(FLAC__StreamEncoderTellCallback)FLACStreamEncoderTellCallback, NULL, this)
!= FLAC__STREAM_ENCODER_INIT_STATUS_OK)
Env->ThrowError("FFmpegSource: Failed to initialize the FLAC encoder for '%s'", AAudioCacheFile);
return FSE;
}
void FFBase::CloseFLACCacheWriter(FLAC__StreamEncoder *AFSE) {
FLAC__stream_encoder_finish(AFSE);
FLAC__stream_encoder_delete(AFSE);
fclose(FCFile);
FCFile = NULL;
}
FILE *FFBase::NewRawCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *RCF = fopen(AAudioCacheFile, "wb");
if (RCF == NULL)
Env->ThrowError("FFmpegSource: Failed to open '%s' for writing", AAudioCacheFile);
return RCF;
}
void FFBase::CloseRawCacheWriter(FILE *ARawCache) {
fclose(ARawCache);
} }
void FFBase::InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env) { void FFBase::InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env) {
@ -164,9 +316,10 @@ void FFBase::InitPP(int AWidth, int AHeight, const char *APPString, int AQuality
void FFBase::SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env) { void FFBase::SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env) {
int Loss; int Loss;
int BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), ACurrentFormat, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss); int BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), ACurrentFormat, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss);
switch (BestFormat) { switch (BestFormat) {
case PIX_FMT_YUVJ420P: // stupid yv12 distinctions, also inexplicably completely undeniably incompatible with all other supported output formats
case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break; case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break;
case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break; case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break;
case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break; case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break;
@ -220,22 +373,47 @@ PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) {
} }
void __stdcall FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) { void __stdcall FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
_fseeki64(AudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET); if (AudioCacheType == acRaw) {
fread(Buf, 1, VI.BytesFromAudioSamples(Count), AudioCache); _fseeki64(RawAudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET);
fread(Buf, 1, VI.BytesFromAudioSamples(Count), RawAudioCache);
} else if (AudioCacheType == acFLAC) {
FCCount = Count;
FCBuffer = Buf;
FLAC__stream_decoder_seek_absolute(FLACAudioCache, Start);
while (FCCount > 0)
FLAC__stream_decoder_process_single(FLACAudioCache);
} else {
Env->ThrowError("FFmpegSource: Audio requested but none available");
}
} }
FFBase::FFBase() { FFBase::FFBase() {
memset(&VI, 0, sizeof(VI)); memset(&VI, 0, sizeof(VI));
AudioCache = NULL; AudioCacheType = acNone;
FCError = false;
RawAudioCache = NULL;
FLACAudioCache = NULL;
PPContext = NULL; PPContext = NULL;
PPMode = NULL; PPMode = NULL;
SWS = NULL; SWS = NULL;
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
FLACBuffer = new FLAC__int32[AVCODEC_MAX_AUDIO_FRAME_SIZE];
ConvertToFormat = PIX_FMT_NONE; ConvertToFormat = PIX_FMT_NONE;
memset(&PPPicture, 0, sizeof(PPPicture)); memset(&PPPicture, 0, sizeof(PPPicture));
DecodeFrame = avcodec_alloc_frame(); DecodeFrame = avcodec_alloc_frame();
} }
FFBase::~FFBase() { FFBase::~FFBase() {
delete [] DecodingBuffer;
delete [] FLACBuffer;
if (RawAudioCache)
fclose(RawAudioCache);
if (FLACAudioCache) {
FLAC__stream_decoder_finish(FLACAudioCache);
FLAC__stream_decoder_delete(FLACAudioCache);
}
if (FCFile)
fclose(FCFile);
if (SWS) if (SWS)
sws_freeContext(SWS); sws_freeContext(SWS);
if (PPMode) if (PPMode)

View File

@ -0,0 +1,458 @@
#include "ffmpegsource.h"
#include "stdiostream.c"
class FFMatroskaSource : public FFBase {
private:
StdIoStream ST;
unsigned int BufferSize;
CompressedStream *VideoCS;
CompressedStream *AudioCS;
AVCodecContext *VideoCodecContext;
MatroskaFile *MF;
char ErrorMessage[256];
uint8_t *Buffer;
int CurrentFrame;
int ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType & TT_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType & TT_VIDEO) ? "video" : "audio");
TrackInfo *TI = mkv_GetTrackInfo(MF, Index);
if (TI->Type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType & TT_VIDEO) ? "video" : "audio");
return Index;
}
public:
FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, IScriptEnvironment* Env) {
CurrentFrame = 0;
int VideoTrack;
int AudioTrack;
unsigned int TrackMask = ~0;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
VideoCodecContext = NULL;
AVCodec *VideoCodec = NULL;
TrackInfo *VideoTI = NULL;
BufferSize = 0;
Buffer = NULL;
VideoCS = NULL;
AudioCS = NULL;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
ST.fp = fopen(ASource, "rb");
if (ST.fp == NULL)
Env->ThrowError("FFmpegSource: Can't open '%s': %s", ASource, strerror(errno));
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
if (MF == NULL) {
fclose(ST.fp);
Env->ThrowError("FFmpegSource: Can't parse Matroska file: %s", ErrorMessage);
}
VideoTrack = GetTrackIndex(AVideoTrack, TT_VIDEO, Env);
AudioTrack = GetTrackIndex(AAudioTrack, TT_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoTI = mkv_GetTrackInfo(MF, VideoTrack);
if (VideoTI->CompEnabled) {
VideoCS = cs_Create(MF, VideoTrack, ErrorMessage, sizeof(ErrorMessage));
if (VideoCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
VideoCodecContext = avcodec_alloc_context();
VideoCodecContext->extradata = (uint8_t *)VideoTI->CodecPrivate;
VideoCodecContext->extradata_size = VideoTI->CodecPrivateSize;
VideoCodec = avcodec_find_decoder(MatroskaToFFCodecID(VideoTI));
if (VideoCodec == NULL)
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open video codec");
// Fix for mpeg2 and other formats where decoding a frame is necessary to get information about the stream
if (VideoCodecContext->pix_fmt == PIX_FMT_NONE) {
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
int64_t Dummy;
DecodeNextFrame(DecodeFrame, &Dummy, Env);
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
}
VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoTI->AV.Video.PixelWidth;
VI.height = VideoTI->AV.Video.PixelHeight;
VI.fps_denominator = 1;
VI.fps_numerator = 30;
SetOutputFormat(VideoCodecContext->pix_fmt, Env);
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env);
if (!VCacheIsValid)
TrackMask &= ~(1 << VideoTrack);
}
if (AudioTrack >= 0) {
TrackInfo *AudioTI = mkv_GetTrackInfo(MF, AudioTrack);
if (AudioTI->CompEnabled) {
AudioCS = cs_Create(MF, AudioTrack, ErrorMessage, sizeof(ErrorMessage));
if (AudioCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
AudioCodecContext = avcodec_alloc_context();
AudioCodecContext->extradata = (uint8_t *)AudioTI->CodecPrivate;
AudioCodecContext->extradata_size = AudioTI->CodecPrivateSize;
AudioCodec = avcodec_find_decoder(MatroskaToFFCodecID(AudioTI));
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
// Fix for ac3 and other codecs where decoding a block of audio is required to get information about it
if (AudioCodecContext->channels == 0 || AudioCodecContext->sample_rate == 0) {
mkv_SetTrackMask(MF, ~(1 << AudioTrack));
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags);
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; AACCompression = -1; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; AACCompression = -1; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; AACCompression = -1; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; AACCompression = -1; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid)
TrackMask &= ~(1 << AudioTrack);
}
mkv_SetTrackMask(MF, TrackMask);
// Needs to be indexed?
if (!ACacheIsValid || !VCacheIsValid) {
FLAC__StreamEncoder *FSE = NULL;
FILE *RawCache = NULL;
if (!ACacheIsValid)
if (AACCompression >= 0)
AudioCacheType = acFLAC;
else
AudioCacheType = acRaw;
switch (AudioCacheType) {
case acFLAC: FSE = NewFLACCacheWriter(AAudioCache, ASource, AudioTrack, AACCompression, Env); break;
case acRaw: RawCache = NewRawCacheWriter(AAudioCache, ASource, AudioTrack, Env); break;
}
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0)
if (Track == VideoTrack && !VCacheIsValid) {
FrameToDTS.push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
VI.num_frames++;
} else if (Track == AudioTrack && !ACacheIsValid) {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
if (AudioCacheType == acFLAC) {
for (int i = 0; i < DecodedSamples * VI.nchannels; i++)
FLACBuffer[i] = ((int16_t *)DecodingBuffer)[i];
FLAC__stream_encoder_process_interleaved(FSE, FLACBuffer, DecodedSamples);
} else if (AudioCacheType == acRaw) {
fwrite(DecodingBuffer, 1, TempOutputBufSize, RawCache);
}
}
}
if (!ACacheIsValid) {
switch (AudioCacheType) {
case acFLAC: CloseFLACCacheWriter(FSE); break;
case acRaw: CloseRawCacheWriter(RawCache); break;
}
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid)
Env->ThrowError("FFmpegSource: Failed to open newly created audio cache for reading");
}
if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0)
mkv_Seek(MF, FrameToDTS.front().DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
}
if (AudioTrack >= 0) {
avcodec_close(AudioCodecContext);
av_free(AudioCodecContext);
}
if (VideoTrack >= 0) {
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
// Calculate the average framerate
if (FrameToDTS.size() >= 2) {
double DTSDiff = (double)(FrameToDTS.back().DTS - FrameToDTS.front().DTS);
VI.fps_denominator = (unsigned int)(DTSDiff * mkv_TruncFloat(VideoTI->TimecodeScale) / (double)1000 / (double)(VI.num_frames - 1) + 0.5);
VI.fps_numerator = 1000000;
}
if (!SaveTimecodesToFile(ATimecodes, mkv_TruncFloat(VideoTI->TimecodeScale), 1000000))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
}
}
~FFMatroskaSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
if (VideoCodecContext)
avcodec_close(VideoCodecContext);
av_free(VideoCodecContext);
}
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
int FFMatroskaSource::ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env) {
if (ACS) {
char CSBuffer[4096];
unsigned int DecompressedFrameSize = 0;
cs_NextFrame(ACS, AFilePos, AFrameSize);
for (;;) {
int ReadBytes = cs_ReadData(ACS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(ACS));
if (ReadBytes == 0) {
return DecompressedFrameSize;
}
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (_fseeki64(ST.fp, AFilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < AFrameSize) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, AFrameSize, ST.fp);
if (ReadBytes != AFrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
Env->ThrowError("FFmpegSource: Unknown read error");
}
return AFrameSize;
}
return 0;
}
int FFMatroskaSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env) {
int FrameFinished = 0;
int Ret = -1;
*AFirstStartTime = -1;
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
FrameSize = ReadFrame(FilePos, FrameSize, VideoCS, Env);
if (*AFirstStartTime < 0)
*AFirstStartTime = StartTime;
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, Buffer, FrameSize);
if (FrameFinished)
goto Done;
}
// Flush the last frame
if (CurrentFrame == VI.num_frames - 1 && VideoCodecContext->has_b_frames)
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, NULL, 0);
Done:
return Ret;
}
PVideoFrame __stdcall FFMatroskaSource::GetFrame(int n, IScriptEnvironment* Env) {
bool HasSeeked = false;
if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame) {
mkv_Seek(MF, FrameToDTS[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true;
}
do {
int64_t StartTime;
int Ret = DecodeNextFrame(DecodeFrame, &StartTime, Env);
if (HasSeeked) {
HasSeeked = false;
if (StartTime < 0 || (CurrentFrame = FrameFromDTS(StartTime)) < 0)
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
}
CurrentFrame++;
} while (CurrentFrame <= n);
return OutputFrame(DecodeFrame, Env);
}
AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!UserData) {
av_register_all();
UserData = (void *)-1;
}
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int VTrack = Args[1].AsInt(-1);
int ATrack = Args[2].AsInt(-2);
const char *Timecodes = Args[3].AsString("");
bool VCache = Args[4].AsBool(true);
const char *VCacheFile = Args[5].AsString("");
const char *ACacheFile = Args[6].AsString("");
int ACCompression = Args[7].AsInt(-1);
const char *PPString = Args[8].AsString("");
int PPQuality = Args[9].AsInt(PP_QUALITY_MAX);
int SeekMode = Args[10].AsInt(1);
if (VTrack <= -2 && ATrack <= -2)
Env->ThrowError("FFmpegSource: No tracks selected");
if (ACCompression < -1 || ACCompression > 8)
Env->ThrowError("FFmpegSource: Invalid audio cache compression selected");
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open %s", Args[0].AsString());
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska)
return new FFMatroskaSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, Env);
else
return new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, SeekMode, Env);
}
AVSValue __cdecl CreateFFPP(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
return new FFPP(Args[0].AsClip(), Args[1].AsString(""), Args[2].AsInt(PP_QUALITY_MAX), Env);
}
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
Env->AddFunction("FFmpegSource", "[source]s[vtrack]i[atrack]i[timecodes]s[vcache]b[vcachefile]s[acachefile]s[accompression]i[pp]s[ppquality]i[seekmode]i", CreateFFmpegSource, 0);
Env->AddFunction("FFPP", "c[pp]s[ppquality]i", CreateFFPP, 0);
return "FFmpegSource";
};

View File

@ -1,166 +1,133 @@
#include "ffmpegsource.h" #include "ffmpegsource.h"
#include "stdiostream.c"
#include "matroskacodecs.c"
class FFMatroskaSource : public FFBase { static DWORD WINAPI AVFindStreamInfoExecute(AVFormatContext *FormatContext) {
private: return av_find_stream_info(FormatContext);
StdIoStream ST; }
unsigned int BufferSize;
CompressedStream *VideoCS;
CompressedStream *AudioCS;
AVCodecContext *VideoCodecContext; int FFmpegSource::GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
MatroskaFile *MF; for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
char ErrorMessage[256]; if (FormatContext->streams[i]->codec->codec_type == ATrackType) {
uint8_t *Buffer; Index = i;
break;
int CurrentFrame;
int ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType & TT_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType & TT_VIDEO) ? "video" : "audio");
TrackInfo *TI = mkv_GetTrackInfo(MF, Index);
if (TI->Type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType & TT_VIDEO) ? "video" : "audio");
return Index;
}
public:
FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString, int AQuality, IScriptEnvironment* Env) {
CurrentFrame = 0;
int VideoTrack;
int AudioTrack;
unsigned int TrackMask = ~0;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
VideoCodecContext = NULL;
AVCodec *VideoCodec = NULL;
TrackInfo *VideoTI = NULL;
BufferSize = 0;
Buffer = NULL;
VideoCS = NULL;
AudioCS = NULL;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
ST.fp = fopen(ASource, "rb");
if (ST.fp == NULL)
Env->ThrowError("FFmpegSource: Can't open '%s': %s", ASource, strerror(errno));
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
if (MF == NULL) {
fclose(ST.fp);
Env->ThrowError("FFmpegSource: Can't parse Matroska file: %s", ErrorMessage);
}
VideoTrack = GetTrackIndex(AVideoTrack, TT_VIDEO, Env);
AudioTrack = GetTrackIndex(AAudioTrack, TT_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoTI = mkv_GetTrackInfo(MF, VideoTrack);
if (VideoTI->CompEnabled) {
VideoCS = cs_Create(MF, VideoTrack, ErrorMessage, sizeof(ErrorMessage));
if (VideoCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
} }
VideoCodecContext = avcodec_alloc_context(); if (Index == -1)
VideoCodecContext->extradata = (uint8_t *)VideoTI->CodecPrivate; Env->ThrowError("FFmpegSource: No %s track found", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
VideoCodecContext->extradata_size = VideoTI->CodecPrivateSize; if (Index <= -2)
return -2;
VideoCodec = avcodec_find_decoder(MatroskaToFFCodecID(VideoTI)); if (Index >= (int)FormatContext->nb_streams)
if (VideoCodec == NULL) Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0) if (FormatContext->streams[Index]->codec->codec_type != ATrackType)
Env->ThrowError("FFmpegSource: Could not open video codec"); Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
// Fix for mpeg2 and other formats where decoding a frame is necessary to get information about the stream return Index;
if (VideoCodecContext->pix_fmt == PIX_FMT_NONE) { }
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
int64_t Dummy;
DecodeNextFrame(DecodeFrame, &Dummy, Env);
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
}
VI.image_type = VideoInfo::IT_TFF; FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, int ASeekMode, IScriptEnvironment* Env) {
VI.width = VideoTI->AV.Video.PixelWidth; CurrentFrame = 0;
VI.height = VideoTI->AV.Video.PixelHeight; SeekMode = ASeekMode;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec;
AVCodec *VideoCodec;
FormatContext = NULL;
VideoCodecContext = NULL;
VideoCodec = NULL;
if (av_open_input_file(&FormatContext, ASource, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open '%s'", ASource);
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
VideoTrack = GetTrackIndex(AVideoTrack, CODEC_TYPE_VIDEO, Env);
int AudioTrack = GetTrackIndex(AAudioTrack, CODEC_TYPE_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoCodecContext = FormatContext->streams[VideoTrack]->codec;
VideoCodec = avcodec_find_decoder(VideoCodecContext->codec_id);
if (VideoCodec == NULL)
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open video codec");
VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoCodecContext->width;
VI.height = VideoCodecContext->height;
VI.fps_denominator = FormatContext->streams[VideoTrack]->time_base.num;
VI.fps_numerator = FormatContext->streams[VideoTrack]->time_base.den;
// sanity check framerate
if (VI.fps_denominator > VI.fps_numerator || VI.fps_denominator <= 0 || VI.fps_numerator <= 0) {
VI.fps_denominator = 1; VI.fps_denominator = 1;
VI.fps_numerator = 30; VI.fps_numerator = 30;
}
SetOutputFormat(VideoCodecContext->pix_fmt, Env);
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env);
}
SetOutputFormat(VideoCodecContext->pix_fmt, Env); if (AudioTrack >= 0) {
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env); AudioCodecContext = FormatContext->streams[AudioTrack]->codec;
if (!VCacheIsValid) AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
TrackMask &= ~(1 << VideoTrack); if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; AACCompression = -1; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; AACCompression = -1; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; AACCompression = -1; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; AACCompression = -1; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
} }
if (AudioTrack >= 0) { VI.nchannels = AudioCodecContext->channels;
TrackInfo *AudioTI = mkv_GetTrackInfo(MF, AudioTrack); VI.audio_samples_per_second = AudioCodecContext->sample_rate;
if (AudioTI->CompEnabled) { ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
AudioCS = cs_Create(MF, AudioTrack, ErrorMessage, sizeof(ErrorMessage)); }
if (AudioCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
AudioCodecContext = avcodec_alloc_context(); // Needs to be indexed?
AudioCodecContext->extradata = (uint8_t *)AudioTI->CodecPrivate; if (!ACacheIsValid || !VCacheIsValid) {
AudioCodecContext->extradata_size = AudioTI->CodecPrivateSize;
AudioCodec = avcodec_find_decoder(MatroskaToFFCodecID(AudioTI)); FLAC__StreamEncoder *FSE = NULL;
if (AudioCodec == NULL) FILE *RawCache = NULL;
Env->ThrowError("FFmpegSource: Audio codec not found"); if (!ACacheIsValid)
if (AACCompression >= 0)
AudioCacheType = acFLAC;
else
AudioCacheType = acRaw;
if (avcodec_open(AudioCodecContext, AudioCodec) < 0) switch (AudioCacheType) {
Env->ThrowError("FFmpegSource: Could not open audio codec"); case acFLAC: FSE = NewFLACCacheWriter(AAudioCache, ASource, AudioTrack, AACCompression, Env); break;
case acRaw: RawCache = NewRawCacheWriter(AAudioCache, ASource, AudioTrack, Env); break;
}
// Fix for ac3 and other codecs where decoding a block of audio is required to get information about it AVPacket Packet;
if (AudioCodecContext->channels == 0 || AudioCodecContext->sample_rate == 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
mkv_SetTrackMask(MF, ~(1 << AudioTrack)); if (Packet.stream_index == VideoTrack && !VCacheIsValid) {
uint64_t StartTime, EndTime, FilePos; FrameToDTS.push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
unsigned int Track, FrameFlags, FrameSize; VI.num_frames++;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags); } else if (Packet.stream_index == AudioTrack && !ACacheIsValid) {
int Size = Packet.size;
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE]; uint8_t *Data = Packet.data;
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) { while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE; int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
@ -168,396 +135,71 @@ public:
if (Ret < 0) if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error"); Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret; Size -= Ret;
Data += Ret; Data += Ret;
VI.num_audio_samples += DecodedSamples;
if (AudioCacheType == acFLAC) {
for (int i = 0; i < DecodedSamples * VI.nchannels; i++)
FLACBuffer[i] = ((int16_t *)DecodingBuffer)[i];
FLAC__stream_encoder_process_interleaved(FSE, FLACBuffer, DecodedSamples);
} else if (AudioCacheType == acRaw) {
fwrite(DecodingBuffer, 1, TempOutputBufSize, RawCache);
}
} }
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
} }
VI.nchannels = AudioCodecContext->channels; av_free_packet(&Packet);
VI.audio_samples_per_second = AudioCodecContext->sample_rate; }
switch (AudioCodecContext->sample_fmt) { if (!ACacheIsValid) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break; switch (AudioCacheType) {
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break; case acFLAC: CloseFLACCacheWriter(FSE); break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break; case acRaw: CloseRawCacheWriter(RawCache); break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
} }
ACacheIsValid = PrepareAudioCache(AAudioCache, ASource, AudioTrack, Env); ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid) if (!ACacheIsValid)
TrackMask &= ~(1 << AudioTrack); Env->ThrowError("FFmpegSource: Failed to open newly created audio cache for reading");
} }
mkv_SetTrackMask(MF, TrackMask); if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE]; if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
// Needs to be indexed?
if (!ACacheIsValid || !VCacheIsValid) {
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0)
if (Track == VideoTrack && !VCacheIsValid) {
FrameToDTS.push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
VI.num_frames++;
} else if (Track == AudioTrack && !ACacheIsValid) {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
VI.num_audio_samples += VI.AudioSamplesFromBytes(TempOutputBufSize);
fwrite(DecodingBuffer, 1, TempOutputBufSize, AudioCache);
}
}
if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0)
mkv_Seek(MF, FrameToDTS.front().DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
}
if (AudioTrack >= 0) {
avcodec_close(AudioCodecContext);
av_free(AudioCodecContext);
}
if (VideoTrack >= 0) {
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
// Calculate the average framerate
if (FrameToDTS.size() >= 2) {
double DTSDiff = (double)(FrameToDTS.back().DTS - FrameToDTS.front().DTS);
VI.fps_denominator = (unsigned int)(DTSDiff * mkv_TruncFloat(VideoTI->TimecodeScale) / (double)1000 / (double)(VI.num_frames - 1) + 0.5);
VI.fps_numerator = 1000000;
}
if (!SaveTimecodesToFile(ATimecodes, mkv_TruncFloat(VideoTI->TimecodeScale), 1000000))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
}
}
~FFMatroskaSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
if (AudioCache)
fclose(AudioCache);
if (VideoCodecContext)
avcodec_close(VideoCodecContext);
av_free(VideoCodecContext);
}
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
int FFMatroskaSource::ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env) {
if (ACS) {
char CSBuffer[4096];
unsigned int DecompressedFrameSize = 0;
cs_NextFrame(ACS, AFilePos, AFrameSize);
for (;;) {
int ReadBytes = cs_ReadData(ACS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(ACS));
if (ReadBytes == 0) {
return DecompressedFrameSize;
}
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (_fseeki64(ST.fp, AFilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < AFrameSize) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, AFrameSize, ST.fp);
if (ReadBytes != AFrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
Env->ThrowError("FFmpegSource: Unknown read error");
}
return AFrameSize;
}
return 0;
}
int FFMatroskaSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env) {
int FrameFinished = 0;
int Ret = -1;
*AFirstStartTime = -1;
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
FrameSize = ReadFrame(FilePos, FrameSize, VideoCS, Env);
if (*AFirstStartTime < 0)
*AFirstStartTime = StartTime;
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, Buffer, FrameSize);
if (FrameFinished)
goto Done;
}
// Flush the last frame
if (CurrentFrame == VI.num_frames - 1 && VideoCodecContext->has_b_frames)
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, NULL, 0);
Done:
return Ret;
}
PVideoFrame __stdcall FFMatroskaSource::GetFrame(int n, IScriptEnvironment* Env) {
bool HasSeeked = false;
if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame) {
mkv_Seek(MF, FrameToDTS[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true;
}
do {
int64_t StartTime;
int Ret = DecodeNextFrame(DecodeFrame, &StartTime, Env);
if (HasSeeked) {
HasSeeked = false;
if (StartTime < 0 || (CurrentFrame = FrameFromDTS(StartTime)) < 0)
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
}
CurrentFrame++;
} while (CurrentFrame <= n);
return OutputFrame(DecodeFrame, Env);
}
class FFmpegSource : public FFBase {
private:
AVFormatContext *FormatContext;
AVCodecContext *VideoCodecContext;
int VideoTrack;
int CurrentFrame;
int SeekMode;
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
if (FormatContext->streams[i]->codec->codec_type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (FormatContext->streams[Index]->codec->codec_type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
return Index;
}
public:
FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString, int AQuality, int ASeekMode, IScriptEnvironment* Env) {
CurrentFrame = 0;
SeekMode = ASeekMode;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec;
AVCodec *VideoCodec;
FormatContext = NULL;
VideoCodecContext = NULL;
VideoCodec = NULL;
if (av_open_input_file(&FormatContext, ASource, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open '%s'", ASource);
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
VideoTrack = GetTrackIndex(AVideoTrack, CODEC_TYPE_VIDEO, Env);
int AudioTrack = GetTrackIndex(AAudioTrack, CODEC_TYPE_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoCodecContext = FormatContext->streams[VideoTrack]->codec;
VideoCodec = avcodec_find_decoder(VideoCodecContext->codec_id);
if (VideoCodec == NULL)
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open video codec");
VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoCodecContext->width;
VI.height = VideoCodecContext->height;
VI.fps_denominator = FormatContext->streams[VideoTrack]->time_base.num;
VI.fps_numerator = FormatContext->streams[VideoTrack]->time_base.den;
// sanity check framerate
if (VI.fps_denominator > VI.fps_numerator || VI.fps_denominator <= 0 || VI.fps_numerator <= 0) {
VI.fps_denominator = 1;
VI.fps_numerator = 30;
}
SetOutputFormat(VideoCodecContext->pix_fmt, Env);
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env);
}
if (AudioTrack >= 0) {
AudioCodecContext = FormatContext->streams[AudioTrack]->codec;
AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
ACacheIsValid = PrepareAudioCache(AAudioCache, ASource, AudioTrack, Env);
}
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
// Needs to be indexed?
if (!ACacheIsValid || !VCacheIsValid) {
AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == VideoTrack && !VCacheIsValid) {
FrameToDTS.push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
VI.num_frames++;
} else if (Packet.stream_index == AudioTrack && !ACacheIsValid) {
int Size = Packet.size;
uint8_t *Data = Packet.data;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
VI.num_audio_samples += VI.AudioSamplesFromBytes(TempOutputBufSize);
fwrite(DecodingBuffer, 1, TempOutputBufSize, AudioCache);
}
}
av_free_packet(&Packet);
}
if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0)
av_seek_frame(FormatContext, VideoTrack, FrameToDTS.front().DTS, AVSEEK_FLAG_BACKWARD);
if (AVCache)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
}
if (AudioTrack >= 0)
avcodec_close(AudioCodecContext);
if (VideoTrack >= 0) {
if (!SaveTimecodesToFile(ATimecodes, FormatContext->streams[VideoTrack]->time_base.num * 1000, FormatContext->streams[VideoTrack]->time_base.den))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
// Adjust framerate to match the duration of the first frame
if (FrameToDTS.size() >= 2) {
int64_t DTSDiff = (double)(FrameToDTS[1].DTS - FrameToDTS[0].DTS);
VI.fps_denominator *= DTSDiff;
}
}
}
~FFmpegSource() {
if (VideoTrack >= 0) if (VideoTrack >= 0)
avcodec_close(VideoCodecContext); av_seek_frame(FormatContext, VideoTrack, FrameToDTS.front().DTS, AVSEEK_FLAG_BACKWARD);
av_close_input_file(FormatContext);
if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
} }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env); if (AudioTrack >= 0)
}; avcodec_close(AudioCodecContext);
if (VideoTrack >= 0) {
if (!SaveTimecodesToFile(ATimecodes, FormatContext->streams[VideoTrack]->time_base.num * 1000, FormatContext->streams[VideoTrack]->time_base.den))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
// Adjust framerate to match the duration of the first frame
if (FrameToDTS.size() >= 2) {
int64_t DTSDiff = (double)(FrameToDTS[1].DTS - FrameToDTS[0].DTS);
VI.fps_denominator *= DTSDiff;
}
}
}
FFmpegSource::~FFmpegSource() {
if (VideoTrack >= 0)
avcodec_close(VideoCodecContext);
av_close_input_file(FormatContext);
}
int FFmpegSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime) { int FFmpegSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime) {
AVPacket Packet; AVPacket Packet;
@ -633,50 +275,3 @@ PVideoFrame __stdcall FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
return OutputFrame(DecodeFrame, Env); return OutputFrame(DecodeFrame, Env);
} }
AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!UserData) {
av_register_all();
UserData = (void *)-1;
}
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int VTrack = Args[1].AsInt(-1);
int ATrack = Args[2].AsInt(-2);
const char *Timecodes = Args[3].AsString("");
bool VCache = Args[4].AsBool(true);
const char *VCacheFile = Args[5].AsString("");
const char *ACacheFile = Args[6].AsString("");
const char *PPString = Args[7].AsString("");
int PPQuality = Args[8].AsInt(PP_QUALITY_MAX);
int SeekMode = Args[9].AsInt(1);
if (VTrack <= -2 && ATrack <= -2)
Env->ThrowError("FFmpegSource: No tracks selected");
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open %s", Args[0].AsString());
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska)
return new FFMatroskaSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, PPString, PPQuality, Env);
else
return new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, PPString, PPQuality, SeekMode, Env);
}
AVSValue __cdecl CreateFFPP(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
return new FFPP(Args[0].AsClip(), Args[1].AsString(""), Args[2].AsInt(PP_QUALITY_MAX), Env);
}
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
Env->AddFunction("FFmpegSource", "[source]s[vtrack]i[atrack]i[timecodes]s[vcache]b[vcachefile]s[acachefile]s[pp]s[ppquality]i[seekmode]i", CreateFFmpegSource, 0);
Env->AddFunction("FFPP", "c[pp]s[ppquality]i", CreateFFPP, 0);
return "FFmpegSource";
};

View File

@ -7,8 +7,8 @@
#include <fcntl.h> #include <fcntl.h>
#include <io.h> #include <io.h>
#include "MatroskaParser.h" #include <stream_decoder.h>
#include "avisynth.h" #include <stream_encoder.h>
extern "C" { extern "C" {
#include <ffmpeg\avformat.h> #include <ffmpeg\avformat.h>
@ -17,6 +17,11 @@ extern "C" {
#include <postproc\postprocess.h> #include <postproc\postprocess.h>
} }
#include "MatroskaParser.h"
#include "avisynth.h"
enum AudioCacheFormat {acNone, acRaw, acFLAC};
int GetPPCPUFlags(IScriptEnvironment *Env); int GetPPCPUFlags(IScriptEnvironment *Env);
int GetSWSCPUFlags(IScriptEnvironment *Env); int GetSWSCPUFlags(IScriptEnvironment *Env);
CodecID MatroskaToFFCodecID(TrackInfo *TI); CodecID MatroskaToFFCodecID(TrackInfo *TI);
@ -45,7 +50,12 @@ private:
protected: protected:
VideoInfo VI; VideoInfo VI;
AVFrame *DecodeFrame; AVFrame *DecodeFrame;
FILE *AudioCache; FILE *RawAudioCache;
FLAC__StreamDecoder *FLACAudioCache;
AudioCacheFormat AudioCacheType;
uint8_t *DecodingBuffer;
FLAC__int32 *FLACBuffer;
struct FrameInfo { struct FrameInfo {
int64_t DTS; int64_t DTS;
@ -61,12 +71,23 @@ protected:
bool LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack); bool LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack); bool SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN); bool SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN);
bool PrepareAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
bool OpenAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
FLAC__StreamEncoder *FFBase::NewFLACCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, int ACompression, IScriptEnvironment *Env);
void FFBase::CloseFLACCacheWriter(FLAC__StreamEncoder *AFSE);
FILE *FFBase::NewRawCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
void FFBase::CloseRawCacheWriter(FILE *ARawCache);
void InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env); void InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env);
void SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env); void SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env);
PVideoFrame OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env); PVideoFrame OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env);
public: public:
// FLAC decoder variables, have to be public
FILE *FCFile;
__int64 FCCount;
void *FCBuffer;
bool FCError;
FFBase(); FFBase();
~FFBase(); ~FFBase();
@ -74,4 +95,21 @@ public:
void __stdcall SetCacheHints(int cachehints, int frame_range) { } void __stdcall SetCacheHints(int cachehints, int frame_range) { }
const VideoInfo& __stdcall GetVideoInfo() { return VI; } const VideoInfo& __stdcall GetVideoInfo() { return VI; }
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env); void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env);
}; };
class FFmpegSource : public FFBase {
private:
AVFormatContext *FormatContext;
AVCodecContext *VideoCodecContext;
int VideoTrack;
int CurrentFrame;
int SeekMode;
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
public:
FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, int ASeekMode, IScriptEnvironment* Env);
~FFmpegSource();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};

View File

@ -9,6 +9,24 @@ FFmpegSource Documentation
<h2>Changes</h2> <h2>Changes</h2>
<ul> <ul>
<li>1.10<ul>
<li>The audio cache compression level is now ignored if the source isn't 16bit and the raw format is used instead</li>
<li>FLAC is now actually initialized properly so the cache actually works for files that aren't stereo (16bit limit still applies)</li>
<li>Now uses proper callbacks for FLAC so it works with larger than 2GB files</li>
<li>Doesn't (over)write the video cache with an empty one in certain cases when avformat is used for the source</li>
</ul></li>
<li>1.9<ul>
<li>Added the possibility to compress the audio cache with FLAC (currently only works with 16bit audio)</li>
<li>Added another planar YUV 4:2:0 format to the supported output formats (fixes certain mov files)</li>
<li>Less memory is now allocated on the stack which makes av_find_stream_info() work for all files (fixes certain mov files)</li>
<li>Updated FFmpeg to rev 10186</li>
</ul></li>
<li>1.8<ul>
<li>Updated FFmpeg to rev 10141</li>
</ul></li>
<li>1.7<ul> <li>1.7<ul>
<li>Updated FFmpeg</li> <li>Updated FFmpeg</li>
<li>Fixed error with mkv for codecs without codec private data and the first packet doesn't belong to them</li> <li>Fixed error with mkv for codecs without codec private data and the first packet doesn't belong to them</li>
@ -85,7 +103,7 @@ FFmpegSource Documentation
<h2>Usage</h2> <h2>Usage</h2>
<p> <p>
<b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, string pp, int ppquality = 6, int seekmode = 1)</b><br /> <b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, int accompression = -1, string pp, int ppquality = 6, int seekmode = 1)</b><br />
</p> </p>
<p> <p>
@ -97,7 +115,6 @@ Note that the audio cache will always be created when opening files with audio a
Separate postprocessing which also seems to include a few simple deinterlacers Separate postprocessing which also seems to include a few simple deinterlacers
</p> </p>
<p> <p>
<b>source:</b> <b>source:</b>
Source file. Source file.
@ -116,7 +133,7 @@ Separate postprocessing which also seems to include a few simple deinterlacers
<p> <p>
<b>vcache:</b> <b>vcache:</b>
Write video indexing information to a file for later use. Write video indexing information to a file for later use. This setting does not control if The video index is loaded which it always is if it exists.
</p> </p>
<p> <p>
@ -124,6 +141,11 @@ Separate postprocessing which also seems to include a few simple deinterlacers
Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(tracknumber)cache for audio. Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(tracknumber)cache for audio.
</p> </p>
<p>
<b>accompression</b>
Audio cache compression, -1 means raw audio and 0-8 uses FLAC with that compression level.
</p>
<p> <p>
<b>pp:</b> <b>pp:</b>
See the table below for a full description, an empty string means no processing. It is recommended to avoid the autoq option since it's currently unknown what effect it will have on the processing. See the table below for a full description, an empty string means no processing. It is recommended to avoid the autoq option since it's currently unknown what effect it will have on the processing.

View File

@ -1,19 +1,5 @@
#include "ffmpegsource.h" #include "ffmpegsource.h"
int GetPPCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= PP_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= PP_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= PP_CPU_CAPS_3DNOW;
return Flags;
}
FFPP::FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env) : GenericVideoFilter(AChild) { FFPP::FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env) : GenericVideoFilter(AChild) {
if (!strcmp(APPString, "")) if (!strcmp(APPString, ""))
Env->ThrowError("FFPP: PP argument is empty"); Env->ThrowError("FFPP: PP argument is empty");

29
FFmpegSource/ffshared.cpp Normal file
View File

@ -0,0 +1,29 @@
#include "ffmpegsource.h"
int GetPPCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= PP_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= PP_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= PP_CPU_CAPS_3DNOW;
return Flags;
}
int GetSWSCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= SWS_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= SWS_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= SWS_CPU_CAPS_3DNOW;
return Flags;
}

View File

@ -1,238 +0,0 @@
CodecID MatroskaToFFCodecID(TrackInfo *TI) {
char *Codec = TI->CodecID;
/* Video Codecs */
if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
// fourcc list from ffdshow
switch (((BITMAPINFOHEADER *)TI->CodecPrivate)->biCompression) {
case MAKEFOURCC('F', 'F', 'D', 'S'):
case MAKEFOURCC('F', 'V', 'F', 'W'):
case MAKEFOURCC('X', 'V', 'I', 'D'):
case MAKEFOURCC('D', 'I', 'V', 'X'):
case MAKEFOURCC('D', 'X', '5', '0'):
case MAKEFOURCC('M', 'P', '4', 'V'):
case MAKEFOURCC('3', 'I', 'V', 'X'):
case MAKEFOURCC('W', 'V', '1', 'F'):
case MAKEFOURCC('F', 'M', 'P', '4'):
case MAKEFOURCC('S', 'M', 'P', '4'):
return CODEC_ID_MPEG4;
case MAKEFOURCC('D', 'I', 'V', '3'):
case MAKEFOURCC('D', 'V', 'X', '3'):
case MAKEFOURCC('M', 'P', '4', '3'):
return CODEC_ID_MSMPEG4V3;
case MAKEFOURCC('M', 'P', '4', '2'):
return CODEC_ID_MSMPEG4V2;
case MAKEFOURCC('M', 'P', '4', '1'):
return CODEC_ID_MSMPEG4V1;
case MAKEFOURCC('W', 'M', 'V', '1'):
return CODEC_ID_WMV1;
case MAKEFOURCC('W', 'M', 'V', '2'):
return CODEC_ID_WMV2;
case MAKEFOURCC('W', 'M', 'V', '3'):
return CODEC_ID_WMV3;
/*
case MAKEFOURCC('M', 'S', 'S', '1'):
case MAKEFOURCC('M', 'S', 'S', '2'):
case MAKEFOURCC('W', 'V', 'P', '2'):
case MAKEFOURCC('W', 'M', 'V', 'P'):
return CODEC_ID_WMV9_LIB;
*/
case MAKEFOURCC('W', 'V', 'C', '1'):
return CODEC_ID_VC1;
case MAKEFOURCC('V', 'P', '5', '0'):
return CODEC_ID_VP5;
case MAKEFOURCC('V', 'P', '6', '0'):
case MAKEFOURCC('V', 'P', '6', '1'):
case MAKEFOURCC('V', 'P', '6', '2'):
return CODEC_ID_VP6;
case MAKEFOURCC('V', 'P', '6', 'F'):
case MAKEFOURCC('F', 'L', 'V', '4'):
return CODEC_ID_VP6F;
case MAKEFOURCC('C', 'A', 'V', 'S'):
return CODEC_ID_CAVS;
case MAKEFOURCC('M', 'P', 'G', '1'):
case MAKEFOURCC('M', 'P', 'E', 'G'):
return CODEC_ID_MPEG2VIDEO; // not a typo
case MAKEFOURCC('M', 'P', 'G', '2'):
case MAKEFOURCC('E', 'M', '2', 'V'):
case MAKEFOURCC('M', 'M', 'E', 'S'):
return CODEC_ID_MPEG2VIDEO;
case MAKEFOURCC('H', '2', '6', '3'):
case MAKEFOURCC('S', '2', '6', '3'):
case MAKEFOURCC('L', '2', '6', '3'):
case MAKEFOURCC('M', '2', '6', '3'):
case MAKEFOURCC('U', '2', '6', '3'):
case MAKEFOURCC('X', '2', '6', '3'):
return CODEC_ID_H263;
case MAKEFOURCC('H', '2', '6', '4'):
case MAKEFOURCC('X', '2', '6', '4'):
case MAKEFOURCC('V', 'S', 'S', 'H'):
case MAKEFOURCC('D', 'A', 'V', 'C'):
case MAKEFOURCC('P', 'A', 'V', 'C'):
case MAKEFOURCC('A', 'V', 'C', '1'):
return CODEC_ID_H264;
case MAKEFOURCC('M', 'J', 'P', 'G'):
case MAKEFOURCC('L', 'J', 'P', 'G'):
case MAKEFOURCC('M', 'J', 'L', 'S'):
case MAKEFOURCC('J', 'P', 'E', 'G'): // questionable fourcc?
case MAKEFOURCC('A', 'V', 'R', 'N'):
case MAKEFOURCC('M', 'J', 'P', 'A'):
return CODEC_ID_MJPEG;
case MAKEFOURCC('D', 'V', 'S', 'D'):
case MAKEFOURCC('D', 'V', '2', '5'):
case MAKEFOURCC('D', 'V', '5', '0'):
case MAKEFOURCC('C', 'D', 'V', 'C'):
case MAKEFOURCC('C', 'D', 'V', '5'):
case MAKEFOURCC('D', 'V', 'I', 'S'):
case MAKEFOURCC('P', 'D', 'V', 'C'):
return CODEC_ID_DVVIDEO;
case MAKEFOURCC('H', 'F', 'Y', 'U'):
case MAKEFOURCC('F', 'F', 'V', 'H'):
return CODEC_ID_HUFFYUV;
case MAKEFOURCC('C', 'Y', 'U', 'V'):
return CODEC_ID_CYUV;
case MAKEFOURCC('A', 'S', 'V', '1'):
return CODEC_ID_ASV1;
case MAKEFOURCC('A', 'S', 'V', '2'):
return CODEC_ID_ASV2;
case MAKEFOURCC('V', 'C', 'R', '1'):
return CODEC_ID_VCR1;
case MAKEFOURCC('T', 'H', 'E', 'O'):
return CODEC_ID_THEORA;
case MAKEFOURCC('S', 'V', 'Q', '1'):
return CODEC_ID_SVQ1;
case MAKEFOURCC('S', 'V', 'Q', '3'):
return CODEC_ID_SVQ3;
case MAKEFOURCC('R', 'P', 'Z', 'A'):
return CODEC_ID_RPZA;
case MAKEFOURCC('F', 'F', 'V', '1'):
return CODEC_ID_FFV1;
case MAKEFOURCC('V', 'P', '3', '1'):
return CODEC_ID_VP3;
case MAKEFOURCC('R', 'L', 'E', '8'):
return CODEC_ID_MSRLE;
case MAKEFOURCC('M', 'S', 'Z', 'H'):
return CODEC_ID_MSZH;
case MAKEFOURCC('Z', 'L', 'I', 'B'):
return CODEC_ID_FLV1;
case MAKEFOURCC('F', 'L', 'V', '1'):
return CODEC_ID_ZLIB;
/*
case MAKEFOURCC('P', 'N', 'G', '1'):
return CODEC_ID_COREPNG;
*/
case MAKEFOURCC('M', 'P', 'N', 'G'):
return CODEC_ID_PNG;
/*
case MAKEFOURCC('A', 'V', 'I', 'S'):
return CODEC_ID_AVISYNTH;
*/
case MAKEFOURCC('C', 'R', 'A', 'M'):
return CODEC_ID_MSVIDEO1;
case MAKEFOURCC('R', 'T', '2', '1'):
return CODEC_ID_INDEO2;
case MAKEFOURCC('I', 'V', '3', '2'):
case MAKEFOURCC('I', 'V', '3', '1'):
return CODEC_ID_INDEO3;
case MAKEFOURCC('C', 'V', 'I', 'D'):
return CODEC_ID_CINEPAK;
case MAKEFOURCC('R', 'V', '1', '0'):
return CODEC_ID_RV10;
case MAKEFOURCC('R', 'V', '2', '0'):
return CODEC_ID_RV20;
case MAKEFOURCC('8', 'B', 'P', 'S'):
return CODEC_ID_8BPS;
case MAKEFOURCC('Q', 'R', 'L', 'E'):
return CODEC_ID_QTRLE;
case MAKEFOURCC('D', 'U', 'C', 'K'):
return CODEC_ID_TRUEMOTION1;
case MAKEFOURCC('T', 'M', '2', '0'):
return CODEC_ID_TRUEMOTION2;
case MAKEFOURCC('T', 'S', 'C', 'C'):
return CODEC_ID_TSCC;
case MAKEFOURCC('S', 'N', 'O', 'W'):
return CODEC_ID_SNOW;
case MAKEFOURCC('Q', 'P', 'E', 'G'):
case MAKEFOURCC('Q', '1', '_', '0'):
case MAKEFOURCC('Q', '1', '_', '1'):
return CODEC_ID_QPEG;
case MAKEFOURCC('H', '2', '6', '1'):
case MAKEFOURCC('M', '2', '6', '1'):
return CODEC_ID_H261;
case MAKEFOURCC('L', 'O', 'C', 'O'):
return CODEC_ID_LOCO;
case MAKEFOURCC('W', 'N', 'V', '1'):
return CODEC_ID_WNV1;
case MAKEFOURCC('C', 'S', 'C', 'D'):
return CODEC_ID_CSCD;
case MAKEFOURCC('Z', 'M', 'B', 'V'):
return CODEC_ID_ZMBV;
case MAKEFOURCC('U', 'L', 'T', 'I'):
return CODEC_ID_ULTI;
case MAKEFOURCC('V', 'I', 'X', 'L'):
return CODEC_ID_VIXL;
case MAKEFOURCC('A', 'A', 'S', 'C'):
return CODEC_ID_AASC;
case MAKEFOURCC('F', 'P', 'S', '1'):
return CODEC_ID_FRAPS;
default:
return CODEC_ID_NONE;
}
} else if (!strcmp(Codec, "V_MPEG4/ISO/AVC"))
return CODEC_ID_H264;
else if (!strcmp(Codec, "V_MPEG4/ISO/ASP"))
return CODEC_ID_MPEG4;
else if (!strcmp(Codec, "V_MPEG2"))
return CODEC_ID_MPEG2VIDEO;
else if (!strcmp(Codec, "V_MPEG1"))
return CODEC_ID_MPEG2VIDEO; // still not a typo
else if (!strcmp(Codec, "V_SNOW"))
return CODEC_ID_SNOW;
else if (!strcmp(Codec, "V_THEORA"))
return CODEC_ID_THEORA;
else if (!strncmp(Codec, "V_REAL/RV", 9)) {
switch (Codec[9]) {
case '1':
return CODEC_ID_RV10;
case '2':
return CODEC_ID_RV20;
case '3':
return CODEC_ID_RV30;
case '4':
return CODEC_ID_RV40;
default:
return CODEC_ID_NONE;
}
/* Audio Codecs */
} else if (!strcmp(Codec, "A_AC3"))
return CODEC_ID_AC3;
else if (!strcmp(Codec, "A_MPEG/L3"))
return CODEC_ID_MP3;
else if (!strcmp(Codec, "A_MPEG/L2"))
return CODEC_ID_MP2;
else if (!strcmp(Codec, "A_MPEG/L1"))
return CODEC_ID_MP2; // correct?
else if (!strcmp(Codec, "A_DTS"))
return CODEC_ID_DTS;
/*
else if (!strcmp(Codec, "A_PCM/INT/LIT"))
return CODEC_ID_PCM_S16LE;
else if (!strcmp(Codec, "A_PCM/FLOAT/IEEE"))
return CODEC_ID_PCM_S16LE;
*/
else if (!strcmp(Codec, "A_TTA1"))
return CODEC_ID_TTA;
else if (!strcmp(Codec, "A_WAVPACK4"))
return CODEC_ID_WAVPACK;
else if (!strcmp(Codec, "A_VORBIS"))
return CODEC_ID_VORBIS;
else if (!strncmp(Codec, "A_REAL/", 7)) {
// not supported
return CODEC_ID_NONE;
} else if (!strncmp(Codec, "A_AAC", 5))
return CODEC_ID_AAC;
else if (!strcmp(Codec, "A_MS/ACM")) {
// not supported
return CODEC_ID_NONE;
} else
return CODEC_ID_NONE;
}

View File

@ -0,0 +1,240 @@
#include "ffmpegsource.h"
CodecID MatroskaToFFCodecID(TrackInfo *TI) {
char *Codec = TI->CodecID;
/* Video Codecs */
if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
// fourcc list from ffdshow
switch (((BITMAPINFOHEADER *)TI->CodecPrivate)->biCompression) {
case MAKEFOURCC('F', 'F', 'D', 'S'):
case MAKEFOURCC('F', 'V', 'F', 'W'):
case MAKEFOURCC('X', 'V', 'I', 'D'):
case MAKEFOURCC('D', 'I', 'V', 'X'):
case MAKEFOURCC('D', 'X', '5', '0'):
case MAKEFOURCC('M', 'P', '4', 'V'):
case MAKEFOURCC('3', 'I', 'V', 'X'):
case MAKEFOURCC('W', 'V', '1', 'F'):
case MAKEFOURCC('F', 'M', 'P', '4'):
case MAKEFOURCC('S', 'M', 'P', '4'):
return CODEC_ID_MPEG4;
case MAKEFOURCC('D', 'I', 'V', '3'):
case MAKEFOURCC('D', 'V', 'X', '3'):
case MAKEFOURCC('M', 'P', '4', '3'):
return CODEC_ID_MSMPEG4V3;
case MAKEFOURCC('M', 'P', '4', '2'):
return CODEC_ID_MSMPEG4V2;
case MAKEFOURCC('M', 'P', '4', '1'):
return CODEC_ID_MSMPEG4V1;
case MAKEFOURCC('W', 'M', 'V', '1'):
return CODEC_ID_WMV1;
case MAKEFOURCC('W', 'M', 'V', '2'):
return CODEC_ID_WMV2;
case MAKEFOURCC('W', 'M', 'V', '3'):
return CODEC_ID_WMV3;
/*
case MAKEFOURCC('M', 'S', 'S', '1'):
case MAKEFOURCC('M', 'S', 'S', '2'):
case MAKEFOURCC('W', 'V', 'P', '2'):
case MAKEFOURCC('W', 'M', 'V', 'P'):
return CODEC_ID_WMV9_LIB;
*/
case MAKEFOURCC('W', 'V', 'C', '1'):
return CODEC_ID_VC1;
case MAKEFOURCC('V', 'P', '5', '0'):
return CODEC_ID_VP5;
case MAKEFOURCC('V', 'P', '6', '0'):
case MAKEFOURCC('V', 'P', '6', '1'):
case MAKEFOURCC('V', 'P', '6', '2'):
return CODEC_ID_VP6;
case MAKEFOURCC('V', 'P', '6', 'F'):
case MAKEFOURCC('F', 'L', 'V', '4'):
return CODEC_ID_VP6F;
case MAKEFOURCC('C', 'A', 'V', 'S'):
return CODEC_ID_CAVS;
case MAKEFOURCC('M', 'P', 'G', '1'):
case MAKEFOURCC('M', 'P', 'E', 'G'):
return CODEC_ID_MPEG2VIDEO; // not a typo
case MAKEFOURCC('M', 'P', 'G', '2'):
case MAKEFOURCC('E', 'M', '2', 'V'):
case MAKEFOURCC('M', 'M', 'E', 'S'):
return CODEC_ID_MPEG2VIDEO;
case MAKEFOURCC('H', '2', '6', '3'):
case MAKEFOURCC('S', '2', '6', '3'):
case MAKEFOURCC('L', '2', '6', '3'):
case MAKEFOURCC('M', '2', '6', '3'):
case MAKEFOURCC('U', '2', '6', '3'):
case MAKEFOURCC('X', '2', '6', '3'):
return CODEC_ID_H263;
case MAKEFOURCC('H', '2', '6', '4'):
case MAKEFOURCC('X', '2', '6', '4'):
case MAKEFOURCC('V', 'S', 'S', 'H'):
case MAKEFOURCC('D', 'A', 'V', 'C'):
case MAKEFOURCC('P', 'A', 'V', 'C'):
case MAKEFOURCC('A', 'V', 'C', '1'):
return CODEC_ID_H264;
case MAKEFOURCC('M', 'J', 'P', 'G'):
case MAKEFOURCC('L', 'J', 'P', 'G'):
case MAKEFOURCC('M', 'J', 'L', 'S'):
case MAKEFOURCC('J', 'P', 'E', 'G'): // questionable fourcc?
case MAKEFOURCC('A', 'V', 'R', 'N'):
case MAKEFOURCC('M', 'J', 'P', 'A'):
return CODEC_ID_MJPEG;
case MAKEFOURCC('D', 'V', 'S', 'D'):
case MAKEFOURCC('D', 'V', '2', '5'):
case MAKEFOURCC('D', 'V', '5', '0'):
case MAKEFOURCC('C', 'D', 'V', 'C'):
case MAKEFOURCC('C', 'D', 'V', '5'):
case MAKEFOURCC('D', 'V', 'I', 'S'):
case MAKEFOURCC('P', 'D', 'V', 'C'):
return CODEC_ID_DVVIDEO;
case MAKEFOURCC('H', 'F', 'Y', 'U'):
case MAKEFOURCC('F', 'F', 'V', 'H'):
return CODEC_ID_HUFFYUV;
case MAKEFOURCC('C', 'Y', 'U', 'V'):
return CODEC_ID_CYUV;
case MAKEFOURCC('A', 'S', 'V', '1'):
return CODEC_ID_ASV1;
case MAKEFOURCC('A', 'S', 'V', '2'):
return CODEC_ID_ASV2;
case MAKEFOURCC('V', 'C', 'R', '1'):
return CODEC_ID_VCR1;
case MAKEFOURCC('T', 'H', 'E', 'O'):
return CODEC_ID_THEORA;
case MAKEFOURCC('S', 'V', 'Q', '1'):
return CODEC_ID_SVQ1;
case MAKEFOURCC('S', 'V', 'Q', '3'):
return CODEC_ID_SVQ3;
case MAKEFOURCC('R', 'P', 'Z', 'A'):
return CODEC_ID_RPZA;
case MAKEFOURCC('F', 'F', 'V', '1'):
return CODEC_ID_FFV1;
case MAKEFOURCC('V', 'P', '3', '1'):
return CODEC_ID_VP3;
case MAKEFOURCC('R', 'L', 'E', '8'):
return CODEC_ID_MSRLE;
case MAKEFOURCC('M', 'S', 'Z', 'H'):
return CODEC_ID_MSZH;
case MAKEFOURCC('Z', 'L', 'I', 'B'):
return CODEC_ID_FLV1;
case MAKEFOURCC('F', 'L', 'V', '1'):
return CODEC_ID_ZLIB;
/*
case MAKEFOURCC('P', 'N', 'G', '1'):
return CODEC_ID_COREPNG;
*/
case MAKEFOURCC('M', 'P', 'N', 'G'):
return CODEC_ID_PNG;
/*
case MAKEFOURCC('A', 'V', 'I', 'S'):
return CODEC_ID_AVISYNTH;
*/
case MAKEFOURCC('C', 'R', 'A', 'M'):
return CODEC_ID_MSVIDEO1;
case MAKEFOURCC('R', 'T', '2', '1'):
return CODEC_ID_INDEO2;
case MAKEFOURCC('I', 'V', '3', '2'):
case MAKEFOURCC('I', 'V', '3', '1'):
return CODEC_ID_INDEO3;
case MAKEFOURCC('C', 'V', 'I', 'D'):
return CODEC_ID_CINEPAK;
case MAKEFOURCC('R', 'V', '1', '0'):
return CODEC_ID_RV10;
case MAKEFOURCC('R', 'V', '2', '0'):
return CODEC_ID_RV20;
case MAKEFOURCC('8', 'B', 'P', 'S'):
return CODEC_ID_8BPS;
case MAKEFOURCC('Q', 'R', 'L', 'E'):
return CODEC_ID_QTRLE;
case MAKEFOURCC('D', 'U', 'C', 'K'):
return CODEC_ID_TRUEMOTION1;
case MAKEFOURCC('T', 'M', '2', '0'):
return CODEC_ID_TRUEMOTION2;
case MAKEFOURCC('T', 'S', 'C', 'C'):
return CODEC_ID_TSCC;
case MAKEFOURCC('S', 'N', 'O', 'W'):
return CODEC_ID_SNOW;
case MAKEFOURCC('Q', 'P', 'E', 'G'):
case MAKEFOURCC('Q', '1', '_', '0'):
case MAKEFOURCC('Q', '1', '_', '1'):
return CODEC_ID_QPEG;
case MAKEFOURCC('H', '2', '6', '1'):
case MAKEFOURCC('M', '2', '6', '1'):
return CODEC_ID_H261;
case MAKEFOURCC('L', 'O', 'C', 'O'):
return CODEC_ID_LOCO;
case MAKEFOURCC('W', 'N', 'V', '1'):
return CODEC_ID_WNV1;
case MAKEFOURCC('C', 'S', 'C', 'D'):
return CODEC_ID_CSCD;
case MAKEFOURCC('Z', 'M', 'B', 'V'):
return CODEC_ID_ZMBV;
case MAKEFOURCC('U', 'L', 'T', 'I'):
return CODEC_ID_ULTI;
case MAKEFOURCC('V', 'I', 'X', 'L'):
return CODEC_ID_VIXL;
case MAKEFOURCC('A', 'A', 'S', 'C'):
return CODEC_ID_AASC;
case MAKEFOURCC('F', 'P', 'S', '1'):
return CODEC_ID_FRAPS;
default:
return CODEC_ID_NONE;
}
} else if (!strcmp(Codec, "V_MPEG4/ISO/AVC"))
return CODEC_ID_H264;
else if (!strcmp(Codec, "V_MPEG4/ISO/ASP"))
return CODEC_ID_MPEG4;
else if (!strcmp(Codec, "V_MPEG2"))
return CODEC_ID_MPEG2VIDEO;
else if (!strcmp(Codec, "V_MPEG1"))
return CODEC_ID_MPEG2VIDEO; // still not a typo
else if (!strcmp(Codec, "V_SNOW"))
return CODEC_ID_SNOW;
else if (!strcmp(Codec, "V_THEORA"))
return CODEC_ID_THEORA;
else if (!strncmp(Codec, "V_REAL/RV", 9)) {
switch (Codec[9]) {
case '1':
return CODEC_ID_RV10;
case '2':
return CODEC_ID_RV20;
case '3':
return CODEC_ID_RV30;
case '4':
return CODEC_ID_RV40;
default:
return CODEC_ID_NONE;
}
/* Audio Codecs */
} else if (!strcmp(Codec, "A_AC3"))
return CODEC_ID_AC3;
else if (!strcmp(Codec, "A_MPEG/L3"))
return CODEC_ID_MP3;
else if (!strcmp(Codec, "A_MPEG/L2"))
return CODEC_ID_MP2;
else if (!strcmp(Codec, "A_MPEG/L1"))
return CODEC_ID_MP2; // correct?
else if (!strcmp(Codec, "A_DTS"))
return CODEC_ID_DTS;
/*
else if (!strcmp(Codec, "A_PCM/INT/LIT"))
return CODEC_ID_PCM_S16LE;
else if (!strcmp(Codec, "A_PCM/FLOAT/IEEE"))
return CODEC_ID_PCM_S16LE;
*/
else if (!strcmp(Codec, "A_TTA1"))
return CODEC_ID_TTA;
else if (!strcmp(Codec, "A_WAVPACK4"))
return CODEC_ID_WAVPACK;
else if (!strcmp(Codec, "A_VORBIS"))
return CODEC_ID_VORBIS;
else if (!strncmp(Codec, "A_REAL/", 7)) {
// not supported
return CODEC_ID_NONE;
} else if (!strncmp(Codec, "A_AAC", 5))
return CODEC_ID_AAC;
else if (!strcmp(Codec, "A_MS/ACM")) {
// not supported
return CODEC_ID_NONE;
} else
return CODEC_ID_NONE;
}