FFmpegSource 1.3

Originally committed to SVN as r1421.
This commit is contained in:
Fredrik Mellbin 2007-07-13 05:15:28 +00:00
parent fabfcd2a73
commit 02e15b0ac5
3 changed files with 651 additions and 143 deletions

View File

@ -1,6 +1,5 @@
#include <windows.h>
#include <stdio.h>
#include <map>
#include <vector>
#include <stdlib.h>
#include <errno.h>
@ -20,13 +19,26 @@ extern "C" {
#include "matroskacodecs.c"
class FFBase : public IClip {
protected:
VideoInfo VI;
public:
bool __stdcall GetParity(int n) { return false; }
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { }
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) { return NULL; }
const VideoInfo& __stdcall GetVideoInfo() { return VI; }
FFBase() {
memset(&VI, 0, sizeof(VI));
}
};
class FFVideoBase : public FFBase {
private:
SwsContext *SWS;
int ConvertToFormat;
int ConvertFromFormat;
protected:
VideoInfo VI;
struct FrameInfo {
int64_t DTS;
bool KeyFrame;
@ -134,43 +146,76 @@ protected:
}
public:
virtual bool __stdcall GetParity(int n) { return 0; }
virtual void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { }
virtual void __stdcall SetCacheHints(int cachehints, int frame_range) { }
virtual const VideoInfo& __stdcall GetVideoInfo() { return VI; }
FFBase() {
FFVideoBase() {
SWS = NULL;
ConvertToFormat = PIX_FMT_NONE;
ConvertFromFormat = PIX_FMT_NONE;
memset(&VI, 0, sizeof(VI));
}
};
class FFMKVSource : public FFBase {
class FFAudioBase : public FFBase {
protected:
struct SampleInfo {
int64_t SampleStart;
int64_t DTS;
int64_t FilePos;
bool KeyFrame;
SampleInfo(int64_t _SampleStart, int64_t _DTS, int64_t _FilePos, bool _KeyFrame) {
DTS = _DTS;
SampleStart = _SampleStart;
FilePos = _FilePos;
KeyFrame = _KeyFrame;
}
};
std::vector<SampleInfo> SI;
int FindClosestAudioKeyFrame(int64_t Sample) {
for (unsigned int i = SI.size() - 1; i > 0; i--)
if (SI[i].SampleStart <= Sample && SI[i].KeyFrame)
return i;
return 0;
}
int SampleFromDTS(int64_t DTS) {
for (unsigned int i = 0; i < SI.size(); i++)
if (SI[i].DTS == DTS)
return i;
return -1;
}
int ClosestSampleFromDTS(int64_t DTS) {
int Index = 0;
int64_t BestDiff = 0xFFFFFFFFFFFFFF;
for (unsigned int i = 0; i < SI.size(); i++) {
int64_t CurrentDiff = FFABS(SI[i].DTS - DTS);
if (CurrentDiff < BestDiff) {
BestDiff = CurrentDiff;
Index = i;
}
}
return Index;
}
};
class FFMatroskaBase {
private:
StdIoStream ST;
unsigned int BufferSize;
CompressedStream *CS;
protected:
AVCodecContext CodecContext;
AVCodec *Codec;
AVFrame *Frame;
int CurrentFrame;
StdIoStream ST;
MatroskaFile *MF;
int Track;
char ErrorMessage[256];
unsigned int BufferSize;
uint8_t *Buffer;
CompressedStream *CS;
unsigned int ReadNextFrame(int64_t *FirstStartTime, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *Frame, int64_t *FirstStartTime, IScriptEnvironment* Env);
public:
FFMKVSource(const char *Source, int Track, FILE *Timecodes, bool Cache, FILE *CacheFile, IScriptEnvironment* Env) {
FFMatroskaBase(const char *Source, int _Track, unsigned char TrackType, IScriptEnvironment* Env) {
Track = _Track;
BufferSize = 0;
Buffer = NULL;
Frame = NULL;
CS = NULL;
CurrentFrame = 0;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
@ -196,21 +241,21 @@ public:
if (Track < 0)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == TT_VIDEO) {
if (mkv_GetTrackInfo(MF, i)->Type == TrackType) {
Track = i;
break;
}
if (Track < 0)
Env->ThrowError("FFmpegSource: No video track found");
Env->ThrowError("FFmpegSource: No suitable track found");
if ((unsigned int)Track >= mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid track number: %d", Track);
Env->ThrowError("FFmpegSource: Invalid track number");
TrackInfo *TI = mkv_GetTrackInfo(MF, Track);
if (TI->Type != TT_VIDEO)
Env->ThrowError("FFmpegSource: Selected track is not video");
if (TI->Type != TrackType)
Env->ThrowError("FFmpegSource: Selected track is not of the right type");
mkv_SetTrackMask(MF, ~(1 << Track));
@ -230,6 +275,211 @@ public:
if (avcodec_open(&CodecContext, Codec) < 0)
Env->ThrowError("FFmpegSource: Could not open codec");
}
~FFMatroskaBase() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
avcodec_close(&CodecContext);
}
unsigned int ReadNextFrame(int64_t *FirstStartTime, IScriptEnvironment *Env) {
unsigned int TrackNumber, FrameFlags, FrameSize;
uint64_t EndTime, FilePos, StartTime;
*FirstStartTime = -1;
while (mkv_ReadFrame(MF, 0, &TrackNumber, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
if (*FirstStartTime < 0)
*FirstStartTime = StartTime;
if (CS) {
char CSBuffer[4096];
int DecompressedFrameSize = 0;
cs_NextFrame(CS, FilePos, FrameSize);
for (;;) {
int ReadBytes = cs_ReadData(CS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(CS));
if (ReadBytes == 0)
return DecompressedFrameSize;
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = FrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (fseek(ST.fp, FilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < FrameSize) {
BufferSize = FrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, FrameSize, ST.fp);
if (ReadBytes != FrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
return 0;
}
return FrameSize;
}
}
return 0;
}
};
class FFMKASource : public FFAudioBase, private FFMatroskaBase {
private:
int CurrentSample;
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *FirstStartTime, int64_t *Count, IScriptEnvironment* Env);
public:
FFMKASource(const char *Source, int _Track, IScriptEnvironment* Env) : FFMatroskaBase(Source, _Track, TT_AUDIO, Env) {
CurrentSample = 0;
TrackInfo *TI = mkv_GetTrackInfo(MF, Track);
switch (CodecContext.sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
VI.nchannels = TI->AV.Audio.Channels;
VI.audio_samples_per_second = mkv_TruncFloat(TI->AV.Audio.SamplingFreq);
unsigned TrackNumber, FrameSize, FrameFlags;
ulonglong StartTime, EndTime, FilePos;
while (mkv_ReadFrame(MF, 0, &TrackNumber, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
if (!(FrameFlags & FRAME_UNKNOWN_START))
SI.push_back(SampleInfo(VI.num_audio_samples, StartTime, FilePos, (FrameFlags & FRAME_KF) != 0));
if (true) {
switch (CodecContext.codec_id) {
case CODEC_ID_MP3:
case CODEC_ID_MP2:
VI.num_audio_samples += 1152; break;
default:
Env->ThrowError("Only mp2 and mp3 is currently supported in matroska");
}
} else {
}
}
mkv_Seek(MF, SI[0].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(&CodecContext);
}
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env);
};
int FFMKASource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *FirstStartTime, int64_t *Count, IScriptEnvironment* Env) {
int Ret = -1;
int FrameSize;
int64_t TempStartTime = -1;
*FirstStartTime = -1;
*Count = 0;
while (FrameSize = ReadNextFrame(&TempStartTime, Env)) {
if (*FirstStartTime < 0)
*FirstStartTime = TempStartTime;
uint8_t *Data = (uint8_t *)Buffer;
int Size = FrameSize;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(&CodecContext, (int16_t *)Buf, &TempOutputBufSize, Data, Size);
if (Ret < 0)
goto Done;
Size -= Ret;
Data += Ret;
Buf += TempOutputBufSize;
*Count += VI.AudioSamplesFromBytes(TempOutputBufSize);
if (Size <= 0)
goto Done;
}
}
Done:
return Ret;
}
void __stdcall FFMKASource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
bool HasSeeked = false;
int ClosestKF = FindClosestAudioKeyFrame(Start);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
if (Start < CurrentSample || SI[ClosestKF].SampleStart > CurrentSample) {
mkv_Seek(MF, SI[max(ClosestKF - 10, 0)].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(&CodecContext);
HasSeeked = true;
}
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t StartTime;
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
int64_t DecodeStart = CurrentSample;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &StartTime, &DecodeCount, Env);
if (HasSeeked) {
HasSeeked = false;
int CurrentSampleIndex = SampleFromDTS(StartTime);
if (StartTime < 0 || CurrentSampleIndex < 0 || (CurrentSample = DecodeStart = SI[max(CurrentSampleIndex, 0)].SampleStart) < 0)
Env->ThrowError("FFmpegSource: Sample accurate seeking is not possible in this file");
}
int OffsetBytes = VI.BytesFromAudioSamples(max(0, Start - DecodeStart));
int CopyBytes = max(0, VI.BytesFromAudioSamples(min(RemainingSamples, DecodeCount - max(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
CurrentSample += DecodeCount;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && DecodeCount > 0);
}
class FFMKVSource : public FFVideoBase, private FFMatroskaBase {
private:
AVFrame *Frame;
int CurrentFrame;
int DecodeNextFrame(AVFrame *Frame, int64_t *FirstStartTime, IScriptEnvironment* Env);
public:
FFMKVSource(const char *Source, int _Track, FILE *Timecodes, bool Cache, FILE *CacheFile, IScriptEnvironment* Env) : FFMatroskaBase(Source, _Track, TT_VIDEO, Env) {
Frame = NULL;
CurrentFrame = 0;
TrackInfo *TI = mkv_GetTrackInfo(MF, Track);
VI.image_type = VideoInfo::IT_TFF;
VI.width = TI->AV.Video.PixelWidth;
@ -249,6 +499,9 @@ public:
VI.num_frames++;
}
if (VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
mkv_Seek(MF, FrameToDTS[0].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
if (Cache) {
@ -266,78 +519,12 @@ public:
}
~FFMKVSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
av_free(Frame);
avcodec_close(&CodecContext);
}
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
unsigned int FFMKVSource::ReadNextFrame(int64_t *FirstStartTime, IScriptEnvironment *Env) {
unsigned int TrackNumber, FrameFlags, FrameSize;
uint64_t EndTime, FilePos, StartTime;
*FirstStartTime = -1;
while (mkv_ReadFrame(MF, 0, &TrackNumber, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
if (*FirstStartTime < 0)
*FirstStartTime = StartTime;
if (CS) {
char CSBuffer[4096];
int DecompressedFrameSize = 0;
cs_NextFrame(CS, FilePos, FrameSize);
for (;;) {
int ReadBytes = cs_ReadData(CS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(CS));
if (ReadBytes == 0)
return DecompressedFrameSize;
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = FrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (fseek(ST.fp, FilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < FrameSize) {
BufferSize = FrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, FrameSize, ST.fp);
if (ReadBytes != FrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
return 0;
}
return FrameSize;
}
}
return 0;
}
int FFMKVSource::DecodeNextFrame(AVFrame *Frame, int64_t *FirstStartTime, IScriptEnvironment* Env) {
int FrameFinished = 0;
int Ret = -1;
@ -350,9 +537,14 @@ int FFMKVSource::DecodeNextFrame(AVFrame *Frame, int64_t *FirstStartTime, IScrip
Ret = avcodec_decode_video(&CodecContext, Frame, &FrameFinished, Buffer, FrameSize);
if (FrameFinished)
break;
goto Done;
}
// Flush the last frame
if (CurrentFrame == VI.num_frames - 1)
Ret = avcodec_decode_video(&CodecContext, Frame, &FrameFinished, NULL, 0);
Done:
return Ret;
}
@ -373,7 +565,7 @@ PVideoFrame __stdcall FFMKVSource::GetFrame(int n, IScriptEnvironment* Env) {
HasSeeked = false;
if (StartTime < 0 || (CurrentFrame = FrameFromDTS(StartTime)) < 0)
Env->ThrowError("FFmpegSource: Frame accurate seeking not possible in this file");
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
}
CurrentFrame++;
@ -382,21 +574,17 @@ PVideoFrame __stdcall FFMKVSource::GetFrame(int n, IScriptEnvironment* Env) {
return OutputFrame(Frame, Env);
}
class FFmpegSource : public FFBase {
private:
class FFAVFormatBase {
protected:
AVFormatContext *FormatContext;
AVCodecContext *CodecContext;
AVCodec *Codec;
AVFrame *Frame;
int Track;
int CurrentFrame;
int SeekMode;
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
public:
FFmpegSource(const char *Source, int _Track, int _SeekMode, FILE *Timecodes, bool Cache, FILE *CacheFile, IScriptEnvironment* Env) : Track(_Track), SeekMode(_SeekMode) {
CurrentFrame = 0;
Frame = NULL;
FFAVFormatBase(const char *Source, int _Track, CodecType TrackType, IScriptEnvironment* Env) {
FormatContext = NULL;
Codec = NULL;
Track = _Track;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open \"%s\"", Source);
@ -404,21 +592,21 @@ public:
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
if (Track >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid track number");
if (Track < 0)
for(unsigned int i = 0; i < FormatContext->nb_streams; i++)
if(FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
if(FormatContext->streams[i]->codec->codec_type == TrackType) {
Track = i;
break;
}
if(Track < -1)
Env->ThrowError("FFmpegSource: Couldn't find a video stream");
Env->ThrowError("FFmpegSource: No suitable track found");
if (FormatContext->streams[Track]->codec->codec_type != CODEC_TYPE_VIDEO)
Env->ThrowError("FFmpegSource: Selected stream doesn't contain video");
if (Track >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid track number");
if (FormatContext->streams[Track]->codec->codec_type != TrackType)
Env->ThrowError("FFmpegSource: Selected track is not of the right type");
CodecContext = FormatContext->streams[Track]->codec;
@ -428,6 +616,151 @@ public:
if(avcodec_open(CodecContext, Codec) < 0)
Env->ThrowError("FFmpegSource: Could not open codec");
}
~FFAVFormatBase() {
avcodec_close(CodecContext);
av_close_input_file(FormatContext);
}
};
class FFAudioSource : public FFAudioBase, private FFAVFormatBase {
private:
int CurrentSample;
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *DTS, int64_t *Count, IScriptEnvironment* Env);
public:
FFAudioSource(const char *Source, int _Track, IScriptEnvironment* Env) : FFAVFormatBase(Source, _Track, CODEC_TYPE_AUDIO, Env) {
CurrentSample = 0;
switch (CodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
VI.nchannels = CodecContext->channels;
VI.audio_samples_per_second = CodecContext->sample_rate;
if (CodecContext->frame_size == 0)
Env->ThrowError("Variable frame sizes currently unsupported");
AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == Track) {
SI.push_back(SampleInfo(VI.num_audio_samples, Packet.dts, Packet.pos, (Packet.flags & PKT_FLAG_KEY) != 0));
VI.num_audio_samples += CodecContext->frame_size;
}
av_free_packet(&Packet);
}
if (VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
av_seek_frame(FormatContext, Track, SI[0].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
}
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env);
};
int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *DTS, int64_t *Count, IScriptEnvironment* Env) {
AVPacket Packet;
int Ret = -1;
*Count = 0;
*DTS = -1;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == Track) {
if (*DTS < 0)
*DTS = Packet.dts;
uint8_t *Data = (uint8_t *)Packet.data;
int Size = Packet.size;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(CodecContext, (int16_t *)Buf, &TempOutputBufSize, Data, Size);
if (Ret < 0) {
av_free_packet(&Packet);
goto Done;
}
Size -= Ret;
Data += Ret;
Buf += TempOutputBufSize;
*Count += VI.AudioSamplesFromBytes(TempOutputBufSize);
if (Size <= 0) {
av_free_packet(&Packet);
goto Done;
}
}
}
av_free_packet(&Packet);
}
Done:
return Ret;
}
void __stdcall FFAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
bool HasSeeked = false;
int ClosestKF = FindClosestAudioKeyFrame(Start);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
if (Start < CurrentSample || SI[ClosestKF].SampleStart > CurrentSample) {
av_seek_frame(FormatContext, Track, SI[max(ClosestKF - 10, 0)].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
HasSeeked = true;
}
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t StartTime;
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
int64_t DecodeStart = CurrentSample;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &StartTime, &DecodeCount, Env);
if (HasSeeked) {
HasSeeked = false;
if (StartTime < 0 || (CurrentSample = DecodeStart = SI[max(ClosestSampleFromDTS(StartTime) - 0, 0)].SampleStart) < 0)
Env->ThrowError("FFmpegSource: Sample accurate seeking is not possible in this file");
}
int OffsetBytes = VI.BytesFromAudioSamples(max(0, Start - DecodeStart));
int CopyBytes = max(0, VI.BytesFromAudioSamples(min(RemainingSamples, DecodeCount - max(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
CurrentSample += DecodeCount;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && DecodeCount > 0);
}
class FFVideoSource : public FFVideoBase, private FFAVFormatBase {
private:
AVFrame *Frame;
int CurrentFrame;
int SeekMode;
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
public:
FFVideoSource(const char *Source, int _Track, int _SeekMode, FILE *Timecodes, bool Cache, FILE *CacheFile, IScriptEnvironment* Env) : FFAVFormatBase(Source, _Track, CODEC_TYPE_VIDEO, Env) {
CurrentFrame = 0;
Frame = NULL;
SeekMode = _SeekMode;
VI.image_type = VideoInfo::IT_TFF;
VI.width = CodecContext->width;
@ -456,7 +789,10 @@ public:
av_free_packet(&Packet);
}
av_seek_frame(FormatContext, Track, 0, AVSEEK_FLAG_BACKWARD);
if (VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
av_seek_frame(FormatContext, Track, FrameToDTS[0].DTS, AVSEEK_FLAG_BACKWARD);
if (Cache) {
fprintf(CacheFile, "%d\n", VI.num_frames);
@ -472,16 +808,14 @@ public:
Frame = avcodec_alloc_frame();
}
~FFmpegSource() {
~FFVideoSource() {
av_free(Frame);
avcodec_close(CodecContext);
av_close_input_file(FormatContext);
}
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
int FFmpegSource::DecodeNextFrame(AVFrame *Frame, int64_t *DTS) {
int FFVideoSource::DecodeNextFrame(AVFrame *Frame, int64_t *DTS) {
AVPacket Packet;
int FrameFinished = 0;
int Ret = -1;
@ -498,24 +832,31 @@ int FFmpegSource::DecodeNextFrame(AVFrame *Frame, int64_t *DTS) {
av_free_packet(&Packet);
if (FrameFinished)
break;
goto Done;
}
// Flush the last frame
if (CurrentFrame == VI.num_frames - 1)
Ret = avcodec_decode_video(CodecContext, Frame, &FrameFinished, NULL, 0);
Done:
return Ret;
}
PVideoFrame __stdcall FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
PVideoFrame __stdcall FFVideoSource::GetFrame(int n, IScriptEnvironment* Env) {
bool HasSeeked = false;
int ClosestKF = FindClosestKeyFrame(n);
if (SeekMode == 0) {
if (n < CurrentFrame) {
av_seek_frame(FormatContext, Track, 0, AVSEEK_FLAG_BACKWARD);
av_seek_frame(FormatContext, Track, FrameToDTS[0].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
CurrentFrame = 0;
}
} else {
if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame || (SeekMode == 3 && n > CurrentFrame + 10)) {
av_seek_frame(FormatContext, Track, (SeekMode == 3) ? FrameToDTS[n].DTS : FrameToDTS[FindClosestKeyFrame(n)].DTS, AVSEEK_FLAG_BACKWARD);
// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
av_seek_frame(FormatContext, Track, (SeekMode == 3) ? FrameToDTS[n].DTS : FrameToDTS[ClosestKF].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
HasSeeked = true;
}
@ -525,6 +866,9 @@ PVideoFrame __stdcall FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
int64_t DTS;
int Ret = DecodeNextFrame(Frame, &DTS);
if (SeekMode == 0 && Frame->key_frame)
FrameToDTS[CurrentFrame].KeyFrame = true;
if (HasSeeked) {
HasSeeked = false;
@ -532,7 +876,7 @@ PVideoFrame __stdcall FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
if (DTS < 0 || (CurrentFrame = FrameFromDTS(DTS)) < 0) {
switch (SeekMode) {
case 1:
Env->ThrowError("FFmpegSource: Frame accurate seeking not possible in this file");
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
case 2:
case 3:
CurrentFrame = ClosestFrameFromDTS(DTS);
@ -549,20 +893,85 @@ PVideoFrame __stdcall FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
return OutputFrame(Frame, Env);
}
AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
class FFAudioRefSource : public FFBase, private FFAVFormatBase {
private:
FILE *AudioCache;
public:
FFAudioRefSource(const char *Source, int _Track, const char *CacheFile, IScriptEnvironment* Env) : FFAVFormatBase(Source, _Track, CODEC_TYPE_AUDIO, Env) {
switch (CodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
VI.nchannels = CodecContext->channels;
VI.audio_samples_per_second = CodecContext->sample_rate;
AudioCache = fopen(CacheFile, "ab+");
if (AudioCache == NULL)
Env->ThrowError("FFmpegSource: Failed to open the cache file");
_fseeki64(AudioCache, 0, SEEK_END);
int64_t CacheSize = _ftelli64(AudioCache);
if (CacheSize > 0) {
VI.num_audio_samples = VI.AudioSamplesFromBytes(CacheSize);
} else {
AVPacket Packet;
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == Track) {
uint8_t *Data = (uint8_t *)Packet.data;
int Size = Packet.size;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(CodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0) {
av_free_packet(&Packet);
Env->ThrowError("FFmpegSource: Audio decoding error");
}
Size -= Ret;
Data += Ret;
VI.num_audio_samples += VI.AudioSamplesFromBytes(TempOutputBufSize);
fwrite(DecodingBuffer, 1, TempOutputBufSize, AudioCache);
}
}
av_free_packet(&Packet);
}
}
}
~FFAudioRefSource() {
fclose(AudioCache);
}
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
_fseeki64(AudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET);
fread(Buf, 1, VI.BytesFromAudioSamples(Count), AudioCache);
}
};
AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
// Generate default cache filename
char DefaultCacheFilename[MAX_PATH];
strcpy(DefaultCacheFilename, Args[0].AsString());
strcat(DefaultCacheFilename, ".ffcache");
const char *Source = Args[0].AsString();
int Track = Args[1].AsInt(-1);
int SeekMode = Args[2].AsInt(1);
const char *Timecodes = Args[3].AsString("");
bool Cache = Args[4].AsBool(true);
// Generate default cache filename
char DefaultCacheFilename[1024];
strcpy(DefaultCacheFilename, Source);
strcat(DefaultCacheFilename, ".ffcache");
const char *CacheFilename = Args[5].AsString(DefaultCacheFilename);
if (SeekMode < 0 || SeekMode > 3)
@ -597,7 +1006,7 @@ AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnviro
if (IsMatroska)
Ret = new FFMKVSource(Source, Track, TCFile, Cache, CacheFile, Env);
else
Ret = new FFmpegSource(Source, Track, SeekMode, TCFile, Cache, CacheFile, Env);
Ret = new FFVideoSource(Source, Track, SeekMode, TCFile, Cache, CacheFile, Env);
if (TCFile)
fclose(TCFile);
@ -608,8 +1017,49 @@ AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnviro
return Ret;
}
AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int Track = Args[1].AsInt(-1);
av_register_all();
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open \"%s\"", Source);
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska)
return new FFMKASource(Source, Track, Env);
else
return new FFAudioSource(Source, Track, Env);
}
AVSValue __cdecl CreateFFAudioRefSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int Track = Args[1].AsInt(-1);
// Generate default cache filename
char DefaultCacheFilename[1024];
strcpy(DefaultCacheFilename, Source);
strcat(DefaultCacheFilename, ".ffracache");
const char *CacheFilename = Args[2].AsString(DefaultCacheFilename);
av_register_all();
return new FFAudioRefSource(Source, Track, CacheFilename, Env);
}
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
Env->AddFunction("FFmpegSource", "[source]s[track]i[seekmode]i[timecodes]s[cache]b[cachefile]s", CreateFFmpegSource, 0);
Env->AddFunction("FFVideoSource", "[source]s[track]i[seekmode]i[timecodes]s[cache]b[cachefile]s", CreateFFVideoSource, 0);
Env->AddFunction("FFAudioSource", "[source]s[track]i", CreateFFAudioSource, 0);
Env->AddFunction("FFAudioRefSource", "[source]s[track]i[cachefile]s", CreateFFAudioRefSource, 0);
return "FFmpegSource";
};

View File

@ -10,6 +10,15 @@ FFmpegSource Documentation
<h2>Changes</h2>
<ul>
<li>1.3<ul>
<li>Compiled against ffmpeg rev9620</li>
<li>Added FFAudioRefSource</li>
<li>Added FFAudioSource (has big issues)</li>
<li>Renamed FFmpegSource to FFVideoSource</li>
<li>Adjusted seeking in the forward direction to only be done if the requested frame is more than 10 frames away to reduce unnecessary seeking</li>
<li>Now outputs the last frame properly when there are decoding delays</li>
</ul></li>
<li>1.2<ul>
<li>Compiled against ffmpeg rev9451</li>
<li>Somewhat cleaner source code</li>
@ -29,7 +38,18 @@ FFmpegSource Documentation
<h2>Usage</h2>
<p>
<b>FFmpegSource(string source, int track = -1, int seekmode = 1, string timecodes, bool cache = true, string CacheFile)</b>
<b>FFVideoSource(string source, int track = -1, int seekmode = 1, string timecodes, bool cache = true, string cachefile)</b><br />
A video source that is frame accurate for most popular containers
</p>
<p>
<b>FFAudioSource(string source, int track = -1)</b><br />
Very unreliable attempt at sample accurate seeking in audio files
</p>
<p>
<b>FFAudioRefSource(string source, int track = -1, string cachefile)</b><br />
Decodes all audio to a temporary file which ensures that it will always be sample accurate
</p>
<p>
@ -39,7 +59,7 @@ FFmpegSource Documentation
<p>
<b>track:</b>
track number as seen by the relevant demuxer, starts from 0, -1 means will pick the first video track
track number as seen by the relevant demuxer, starts from 0, -1 means it will pick the first suitable track
</p>
<p>
@ -63,19 +83,25 @@ FFmpegSource Documentation
<p>
<b>cachefile:</b>
if specified the file to store the index information in, if nothing is specified <source>.ffcache is used
if specified the file to store the index information or raw audio in, if nothing is specified (source).ffcache is used for video and (source).ffracache for audio
</p>
<h2>Compatibility</h2>
<h2>Compatibility - Video</h2>
<ul>
<li>AVI, MKV, MP4, FLV: Frame accurate</li>
<li>OGM: Messed up first frame and seeking produces smearing with seekmode=3, incredibly slow seeking without</li>
<li>WMV: No seeking</li>
<li>WMV: Frame accurate(?) but avformat seems to pick keyframes relatively far away</li>
<li>OGM: Messed up first frame and seeking produces smearing with seekmode=3, incredibly slow seeking without, remux to mkv or avi</li>
<li>VOB: No rff flags applied, frame accurate?</li>
<li>MPG: Seeking seems to be off by one or two frames now and then</li>
<li>TS: Anything except seekmode=0 is a complete waste of time</li>
<li>TS: don't bother</li>
</ul>
<h2>Compatibility - Audio</h2>
<ul>
<li>Not sample accurate ever?</li>
</ul>
<h2>Compiling</h2>
<p><b>zlib</b> from http://www.zlib.net/</p>
@ -83,10 +109,10 @@ FFmpegSource Documentation
<p><b>FFmpeg svn</b> from http://ffmpeg.mplayerhq.hu/</p>
<p><b>Required Configuration:</b>
./configure --enable-shared --disable-static --enable-memalign-hack --enable-swscaler --enable-gpl</p>
./configure --enable-shared --disable-static --enable-memalign-hack --enable-gpl --enable-swscaler</p>
<p><b>Suggested Additional Options:</b>
--disable-encoders --disable-muxers</p>
--disable-encoders --disable-muxers --enable-small</p>
</body>
</html>

View File

@ -1,7 +1,8 @@
CodecID MatroskaToFFCodecID(TrackInfo *TI) {
char *Codec = TI->CodecID;
// fourcc list from ffdshow
/* Video Codecs */
if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
// fourcc list from ffdshow
switch (((BITMAPINFOHEADER *)TI->CodecPrivate)->biCompression) {
case MAKEFOURCC('F', 'F', 'D', 'S'):
case MAKEFOURCC('F', 'V', 'F', 'W'):
@ -201,6 +202,37 @@
default:
return CODEC_ID_NONE;
}
/* Audio Codecs */
} else if (!strcmp(Codec, "A_AC3"))
return CODEC_ID_AC3;
else if (!strcmp(Codec, "A_MPEG/L3"))
return CODEC_ID_MP3;
else if (!strcmp(Codec, "A_MPEG/L2"))
return CODEC_ID_MP2;
else if (!strcmp(Codec, "A_MPEG/L1"))
return CODEC_ID_MP2; // correct?
else if (!strcmp(Codec, "A_DTS"))
return CODEC_ID_DTS;
/*
else if (!strcmp(Codec, "A_PCM/INT/LIT"))
return CODEC_ID_PCM_S16LE;
else if (!strcmp(Codec, "A_PCM/FLOAT/IEEE"))
return CODEC_ID_PCM_S16LE;
*/
else if (!strcmp(Codec, "A_TTA1"))
return CODEC_ID_TTA;
else if (!strcmp(Codec, "A_WAVPACK4"))
return CODEC_ID_WAVPACK;
else if (!strcmp(Codec, "A_VORBIS"))
return CODEC_ID_VORBIS;
else if (!strncmp(Codec, "A_REAL/", 7)) {
// not supported
return CODEC_ID_NONE;
} else if (!strncmp(Codec, "A_AAC", 5))
return CODEC_ID_AAC;
else if (!strcmp(Codec, "A_MS/ACM")) {
// not supported
return CODEC_ID_NONE;
} else
return CODEC_ID_NONE;
}