FFmpegSource 1.14

Originally committed to SVN as r1665.
This commit is contained in:
Fredrik Mellbin 2008-01-05 13:27:12 +00:00
parent f27241bd3f
commit 6afff8a422
12 changed files with 808 additions and 61 deletions

View File

@ -0,0 +1,92 @@
// Copyright (c) 2007 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
FFAudioBase::FFAudioBase() {
memset(&VI, 0, sizeof(VI));
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
};
FFAudioBase::~FFAudioBase() {
delete[] DecodingBuffer;
};
size_t FFAudioBase::FindClosestAudioKeyFrame(int64_t Sample) {
for (size_t i = 0; i < SI.size(); i++) {
if (SI[i].SampleStart == Sample && SI[i].KeyFrame)
return i;
else if (SI[i].SampleStart > Sample && SI[i].KeyFrame)
return i - 1;
}
return SI.size() - 1;
}
bool FFAudioBase::LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffas%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AAudioCacheFile, "r");
if (!CacheFile)
return false;
size_t AudioBlocks = 0;
if (fscanf(CacheFile, "%lld %u\r\n", &VI.num_audio_samples, &AudioBlocks) <= 0 || VI.num_audio_samples <= 0 || AudioBlocks <= 0) {
VI.num_audio_samples = 0;
fclose(CacheFile);
return false;
}
for (size_t i = 0; i < AudioBlocks; i++) {
int64_t SampleStart;
int64_t FilePos;
unsigned int FrameSize;
int Flags;
fscanf(CacheFile, "%lld %lld %u %d\r\n", &SampleStart, &FilePos, &FrameSize, &Flags);
SI.push_back(SampleInfo(SampleStart, FilePos, FrameSize, (Flags & 1) != 0));
}
fclose(CacheFile);
return true;
}
bool FFAudioBase::SaveSampleInfoToFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffas%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AAudioCacheFile, "wb");
if (!CacheFile)
return false;
fprintf(CacheFile, "%lld %u\r\n", VI.num_audio_samples, SI.size());
for (size_t i = 0; i < SI.size(); i++) {
int Flags = SI[i].KeyFrame ? 1 : 0;
fprintf(CacheFile, "%lld %lld %u %d\r\n", SI[i].SampleStart, SI[i].FilePos, SI[i].FrameSize, Flags);
}
fclose(CacheFile);
return true;
}

View File

@ -21,8 +21,8 @@
#include "ffmpegsource.h" #include "ffmpegsource.h"
int FFBase::FrameFromDTS(int64_t ADTS) { int FFBase::FrameFromDTS(int64_t ADTS) {
for (int i = 0; i < (int)FrameToDTS.size(); i++) for (int i = 0; i < (int)Frames.size(); i++)
if (FrameToDTS[i].DTS == ADTS) if (Frames[i].DTS == ADTS)
return i; return i;
return -1; return -1;
} }
@ -30,8 +30,8 @@ int FFBase::FrameFromDTS(int64_t ADTS) {
int FFBase::ClosestFrameFromDTS(int64_t ADTS) { int FFBase::ClosestFrameFromDTS(int64_t ADTS) {
int Frame = 0; int Frame = 0;
int64_t BestDiff = 0xFFFFFFFFFFFFFFLL; int64_t BestDiff = 0xFFFFFFFFFFFFFFLL;
for (int i = 0; i < (int)FrameToDTS.size(); i++) { for (int i = 0; i < (int)Frames.size(); i++) {
int64_t CurrentDiff = FFABS(FrameToDTS[i].DTS - ADTS); int64_t CurrentDiff = FFABS(Frames[i].DTS - ADTS);
if (CurrentDiff < BestDiff) { if (CurrentDiff < BestDiff) {
BestDiff = CurrentDiff; BestDiff = CurrentDiff;
Frame = i; Frame = i;
@ -42,7 +42,7 @@ int FFBase::ClosestFrameFromDTS(int64_t ADTS) {
int FFBase::FindClosestKeyFrame(int AFrame) { int FFBase::FindClosestKeyFrame(int AFrame) {
for (int i = AFrame; i > 0; i--) for (int i = AFrame; i > 0; i--)
if (FrameToDTS[i].KeyFrame) if (Frames[i].KeyFrame)
return i; return i;
return 0; return 0;
} }
@ -67,7 +67,7 @@ bool FFBase::LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASou
int64_t DTSTemp; int64_t DTSTemp;
int KFTemp; int KFTemp;
fscanf(CacheFile, "%lld %d\r\n", &DTSTemp, &KFTemp); fscanf(CacheFile, "%lld %d\r\n", &DTSTemp, &KFTemp);
FrameToDTS.push_back(FrameInfo(DTSTemp, KFTemp != 0)); Frames.push_back(FrameInfo(DTSTemp, KFTemp != 0));
} }
fclose(CacheFile); fclose(CacheFile);
@ -86,7 +86,7 @@ bool FFBase::SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASourc
fprintf(CacheFile, "%d\r\n", VI.num_frames); fprintf(CacheFile, "%d\r\n", VI.num_frames);
for (int i = 0; i < VI.num_frames; i++) for (int i = 0; i < VI.num_frames; i++)
fprintf(CacheFile, "%lld %d\r\n", FrameToDTS[i].DTS, (int)(FrameToDTS[i].KeyFrame ? 1 : 0)); fprintf(CacheFile, "%lld %d\r\n", Frames[i].DTS, (int)(Frames[i].KeyFrame ? 1 : 0));
fclose(CacheFile); fclose(CacheFile);
return true; return true;
@ -102,7 +102,7 @@ bool FFBase::SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int6
std::set<int64_t> Timecodes; std::set<int64_t> Timecodes;
for (int i = 0; i < VI.num_frames; i++) for (int i = 0; i < VI.num_frames; i++)
Timecodes.insert(FrameToDTS[i].DTS); Timecodes.insert(Frames[i].DTS);
fprintf(TimecodeFile, "# timecode format v2\r\n"); fprintf(TimecodeFile, "# timecode format v2\r\n");
@ -364,6 +364,15 @@ void FFBase::SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env) {
ConvertToFormat = BestFormat; ConvertToFormat = BestFormat;
SWS = sws_getContext(VI.width, VI.height, ACurrentFormat, VI.width, VI.height, ConvertToFormat, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL); SWS = sws_getContext(VI.width, VI.height, ACurrentFormat, VI.width, VI.height, ConvertToFormat, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
} }
if (BestFormat == PIX_FMT_YUVJ420P || BestFormat == PIX_FMT_YUV420P) {
VI.height -= VI.height & 1;
VI.width -= VI.width & 1;
}
if (BestFormat == PIX_FMT_YUYV422) {
VI.width -= VI.width & 1;
}
} }
PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) { PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) {
@ -407,7 +416,7 @@ PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) {
void FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) { void FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
if (AudioCacheType == acRaw) { if (AudioCacheType == acRaw) {
_fseeki64(RawAudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET); _fseeki64(RawAudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET);
fread(Buf, 1, VI.BytesFromAudioSamples(Count), RawAudioCache); fread(Buf, 1, (size_t)VI.BytesFromAudioSamples(Count), RawAudioCache);
#ifdef FLAC_CACHE #ifdef FLAC_CACHE
} else if (AudioCacheType == acFLAC) { } else if (AudioCacheType == acFLAC) {
FCCount = Count; FCCount = Count;

View File

@ -0,0 +1,297 @@
// Copyright (c) 2007 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFMatroskaAudioSource::GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType & TT_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType & TT_VIDEO) ? "video" : "audio");
TrackInfo *TI = mkv_GetTrackInfo(MF, Index);
if (TI->Type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType & TT_VIDEO) ? "video" : "audio");
return Index;
}
FFMatroskaAudioSource::FFMatroskaAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, IScriptEnvironment *Env) {
int AudioTrack;
AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
TrackInfo *VideoTI = NULL;
BufferSize = 0;
Buffer = NULL;
AudioCS = NULL;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
ST.fp = fopen(ASource, "rb");
if (ST.fp == NULL)
Env->ThrowError("FFmpegSource: Can't open '%s': %s", ASource, strerror(errno));
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
if (MF == NULL) {
fclose(ST.fp);
Env->ThrowError("FFmpegSource: Can't parse Matroska file: %s", ErrorMessage);
}
AudioTrack = GetTrackIndex(AAudioTrack, TT_AUDIO, Env);
mkv_SetTrackMask(MF, ~(1 << AudioTrack));
TrackInfo *AudioTI = mkv_GetTrackInfo(MF, AudioTrack);
if (AudioTI->CompEnabled) {
AudioCS = cs_Create(MF, AudioTrack, ErrorMessage, sizeof(ErrorMessage));
if (AudioCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
AudioCodecContext = avcodec_alloc_context();
AudioCodecContext->extradata = (uint8_t *)AudioTI->CodecPrivate;
AudioCodecContext->extradata_size = AudioTI->CodecPrivateSize;
AudioCodec = avcodec_find_decoder(MatroskaToFFCodecID(AudioTI));
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
// Fix for ac3 and other codecs where decoding a block of audio is required to get information about it
if (AudioCodecContext->channels == 0 || AudioCodecContext->sample_rate == 0) {
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags);
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(AudioCodecContext);
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
//load audio cache
bool ACacheIsValid = LoadSampleInfoFromFile(AAudioCache, ASource, AudioTrack);
// Needs to be indexed?
if (!ACacheIsValid) {
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
SI.push_back(SampleInfo(VI.num_audio_samples, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
if (AudioCodecContext->frame_size > 0) {
VI.num_audio_samples += AudioCodecContext->frame_size;
} else {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
if (Ret > 0) {
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
}
}
}
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(AudioCodecContext);
if (!SaveSampleInfoToFile(AAudioCache, ASource, AudioTrack))
Env->ThrowError("FFmpegSource: Failed to save audio cache index");
}
if (VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
}
FFMatroskaAudioSource::~FFMatroskaAudioSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
if (AudioCodecContext)
avcodec_close(AudioCodecContext);
av_free(AudioCodecContext);
}
int FFMatroskaAudioSource::ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env) {
if (ACS) {
char CSBuffer[4096];
unsigned int DecompressedFrameSize = 0;
cs_NextFrame(ACS, AFilePos, AFrameSize);
for (;;) {
int ReadBytes = cs_ReadData(ACS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(ACS));
if (ReadBytes == 0) {
return DecompressedFrameSize;
}
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (_fseeki64(ST.fp, AFilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < AFrameSize) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, AFrameSize, ST.fp);
if (ReadBytes != AFrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
Env->ThrowError("FFmpegSource: Unknown read error");
}
return AFrameSize;
}
return 0;
}
void __stdcall FFMatroskaAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env) {
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 10, (int64_t)0);
avcodec_flush_buffers(AudioCodecContext);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t DecodeStart = SI[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, SI[CurrentAudioBlock].FilePos, SI[CurrentAudioBlock].FrameSize, Env);
if (Ret < 0)
Env->ThrowError("Bleh, bad audio decoding");
CurrentAudioBlock++;
int64_t OffsetBytes = VI.BytesFromAudioSamples(FFMAX(0, Start - DecodeStart));
int64_t CopyBytes = FFMAX(0, VI.BytesFromAudioSamples(FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && CurrentAudioBlock < SI.size());
}
int FFMatroskaAudioSource::DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env) {
int Ret = -1;
*ACount = 0;
int FrameSize = ReadFrame(AFilePos, AFrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
int Size = FrameSize;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)ABuf, &TempOutputBufSize, Data, Size);
if (Ret < 0) // throw error or something?
goto Done;
if (Ret > 0) {
Size -= Ret;
Data += Ret;
ABuf += TempOutputBufSize;
*ACount += VI.AudioSamplesFromBytes(TempOutputBufSize);
}
}
Done:
return Ret;
}

View File

@ -46,8 +46,9 @@ int FFMatroskaSource::GetTrackIndex(int Index, unsigned char ATrackType, IScript
FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes,
bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString,
int AQuality, IScriptEnvironment* Env) { int AQuality, IScriptEnvironment* Env, FrameInfoVector *AFrames) {
AFrames = &Frames;
CurrentFrame = 0; CurrentFrame = 0;
int VideoTrack; int VideoTrack;
int AudioTrack; int AudioTrack;
@ -121,8 +122,8 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
} }
VI.image_type = VideoInfo::IT_TFF; VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoTI->AV.Video.PixelWidth; VI.width = VideoCodecContext->width;
VI.height = VideoTI->AV.Video.PixelHeight; VI.height = VideoCodecContext->height;;
VI.fps_denominator = 1; VI.fps_denominator = 1;
VI.fps_numerator = 30; VI.fps_numerator = 30;
@ -163,7 +164,6 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
unsigned int Track, FrameFlags, FrameSize; unsigned int Track, FrameFlags, FrameSize;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags); mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags);
uint8_t DecodingBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env); int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer; uint8_t *Data = Buffer;
@ -224,7 +224,7 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0)
if (Track == VideoTrack && !VCacheIsValid) { if (Track == VideoTrack && !VCacheIsValid) {
FrameToDTS.push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0)); Frames.push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
VI.num_frames++; VI.num_frames++;
} else if (Track == AudioTrack && !ACacheIsValid) { } else if (Track == AudioTrack && !ACacheIsValid) {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env); int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
@ -236,7 +236,7 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
if (Ret < 0) if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error"); Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = VI.AudioSamplesFromBytes(TempOutputBufSize); int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret; Size -= Ret;
Data += Ret; Data += Ret;
@ -274,7 +274,7 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
Env->ThrowError("FFmpegSource: Audio track contains no samples"); Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0) if (VideoTrack >= 0)
mkv_Seek(MF, FrameToDTS.front().DTS, MKVF_SEEK_TO_PREV_KEYFRAME); mkv_Seek(MF, Frames.front().DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
if (AVCache && !VCacheIsValid) if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack)) if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
@ -290,8 +290,8 @@ FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAu
mkv_SetTrackMask(MF, ~(1 << VideoTrack)); mkv_SetTrackMask(MF, ~(1 << VideoTrack));
// Calculate the average framerate // Calculate the average framerate
if (FrameToDTS.size() >= 2) { if (Frames.size() >= 2) {
double DTSDiff = (double)(FrameToDTS.back().DTS - FrameToDTS.front().DTS); double DTSDiff = (double)(Frames.back().DTS - Frames.front().DTS);
VI.fps_denominator = (unsigned int)(DTSDiff * mkv_TruncFloat(VideoTI->TimecodeScale) / (double)1000 / (double)(VI.num_frames - 1) + 0.5); VI.fps_denominator = (unsigned int)(DTSDiff * mkv_TruncFloat(VideoTI->TimecodeScale) / (double)1000 / (double)(VI.num_frames - 1) + 0.5);
VI.fps_numerator = 1000000; VI.fps_numerator = 1000000;
} }
@ -398,7 +398,7 @@ PVideoFrame FFMatroskaSource::GetFrame(int n, IScriptEnvironment* Env) {
bool HasSeeked = false; bool HasSeeked = false;
if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame) { if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame) {
mkv_Seek(MF, FrameToDTS[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME); mkv_Seek(MF, Frames[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(VideoCodecContext); avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true; HasSeeked = true;
} }

View File

@ -0,0 +1,222 @@
// Copyright (c) 2007 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFmpegAudioSource::GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
if (FormatContext->streams[i]->codec->codec_type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (FormatContext->streams[Index]->codec->codec_type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
return Index;
}
bool FFmpegAudioSource::LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ADemuxedAudioFile, const char *ASource, int AAudioTrack) {
if (!FFAudioBase::LoadSampleInfoFromFile(AAudioCacheFile, ASource, AAudioTrack))
return false;
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffasd%dcache", ASource, AAudioTrack);
if (!strcmp(ADemuxedAudioFile, ""))
ADemuxedAudioFile = DefaultCacheFilename;
RawCache = fopen(ADemuxedAudioFile, "rb");
if (!RawCache)
return false;
return true;
}
FFmpegAudioSource::FFmpegAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, const char *ADemuxedAudioFile, IScriptEnvironment *Env) {
BufferSize = 0;
Buffer = NULL;
RawCache = NULL;
FormatContext = NULL;
AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
if (av_open_input_file(&FormatContext, ASource, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open '%s'", ASource);
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
AudioTrack = GetTrackIndex(AAudioTrack, CODEC_TYPE_AUDIO, Env);
AudioCodecContext = FormatContext->streams[AudioTrack]->codec;
AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
//load cache
bool ACacheIsValid = LoadSampleInfoFromFile(AAudioCache, ADemuxedAudioFile, ASource, AudioTrack);
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffasd%dcache", ASource, AudioTrack);
if (!strcmp(ADemuxedAudioFile, ""))
ADemuxedAudioFile = DefaultCacheFilename;
if (!RawCache)
RawCache = fopen(ADemuxedAudioFile, "wb+");
// Needs to be indexed?
if (!ACacheIsValid) {
AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == AudioTrack) {
SI.push_back(SampleInfo(VI.num_audio_samples, _ftelli64(RawCache), Packet.size, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
fwrite(Packet.data, 1, Packet.size, RawCache);
if (AudioCodecContext->frame_size > 0) {
VI.num_audio_samples += AudioCodecContext->frame_size;
} else {
int Size = Packet.size;
uint8_t *Data = Packet.data;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
if (Ret > 0) {
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
}
}
}
}
av_free_packet(&Packet);
}
av_seek_frame(FormatContext, AudioTrack, 0, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(AudioCodecContext);
if (!SaveSampleInfoToFile(AAudioCache, ASource, AudioTrack))
Env->ThrowError("FFmpegSource: Failed to save audio cache index");
}
if (VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
}
int FFmpegAudioSource::DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env) {
int Ret = -1;
*ACount = 0;
_fseeki64(RawCache, AFilePos, SEEK_SET);
if (AFrameSize > BufferSize) {
Buffer = (uint8_t *)realloc(Buffer, AFrameSize);
BufferSize = AFrameSize;
}
fread(Buffer, 1, AFrameSize, RawCache);
uint8_t *Data = Buffer;
int Size = AFrameSize;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)ABuf, &TempOutputBufSize, Data, Size);
if (Ret < 0) // throw error or something?
goto Done;
if (Ret > 0) {
Size -= Ret;
Data += Ret;
ABuf += TempOutputBufSize;
*ACount += VI.AudioSamplesFromBytes(TempOutputBufSize);
}
}
Done:
return Ret;
}
void FFmpegAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env) {
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 10, (int64_t)0);
avcodec_flush_buffers(AudioCodecContext);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t DecodeStart = SI[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, SI[CurrentAudioBlock].FilePos, SI[CurrentAudioBlock].FrameSize, Env);
if (Ret < 0)
Env->ThrowError("Bleh, bad audio decoding");
CurrentAudioBlock++;
int64_t OffsetBytes = VI.BytesFromAudioSamples(FFMAX(0, Start - DecodeStart));
int64_t CopyBytes = FFMAX(0, VI.BytesFromAudioSamples(FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && DecodeCount > 0);
}
FFmpegAudioSource::~FFmpegAudioSource() {
if (RawCache)
fclose(RawCache);
if (AudioCodecContext)
avcodec_close(AudioCodecContext);
av_close_input_file(FormatContext);
}

View File

@ -1 +0,0 @@
loadplugin("ffmpegsource.dll")

View File

@ -42,10 +42,13 @@ int FFmpegSource::GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironm
return Index; return Index;
} }
FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes,
bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString,
int AQuality, int ASeekMode, IScriptEnvironment* Env) { int AQuality, int ASeekMode, IScriptEnvironment *Env, FrameInfoVector *AFrames) {
AFrames = &Frames;
CurrentFrame = 0; CurrentFrame = 0;
SeekMode = ASeekMode; SeekMode = ASeekMode;
@ -161,7 +164,7 @@ FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack
AVPacket Packet; AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == VideoTrack && !VCacheIsValid) { if (Packet.stream_index == VideoTrack && !VCacheIsValid) {
FrameToDTS.push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0)); Frames.push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
VI.num_frames++; VI.num_frames++;
} else if (Packet.stream_index == AudioTrack && !ACacheIsValid) { } else if (Packet.stream_index == AudioTrack && !ACacheIsValid) {
int Size = Packet.size; int Size = Packet.size;
@ -173,7 +176,7 @@ FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack
if (Ret < 0) if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error"); Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = VI.AudioSamplesFromBytes(TempOutputBufSize); int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret; Size -= Ret;
Data += Ret; Data += Ret;
@ -214,7 +217,7 @@ FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack
Env->ThrowError("FFmpegSource: Audio track contains no samples"); Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0) if (VideoTrack >= 0)
av_seek_frame(FormatContext, VideoTrack, FrameToDTS.front().DTS, AVSEEK_FLAG_BACKWARD); av_seek_frame(FormatContext, VideoTrack, Frames.front().DTS, AVSEEK_FLAG_BACKWARD);
if (AVCache && !VCacheIsValid) if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack)) if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
@ -229,8 +232,8 @@ FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack
Env->ThrowError("FFmpegSource: Failed to write timecodes"); Env->ThrowError("FFmpegSource: Failed to write timecodes");
// Adjust framerate to match the duration of the first frame // Adjust framerate to match the duration of the first frame
if (FrameToDTS.size() >= 2) { if (Frames.size() >= 2) {
int64_t DTSDiff = FFMAX(FrameToDTS[1].DTS - FrameToDTS[0].DTS, 1); unsigned int DTSDiff = (unsigned int)FFMAX(Frames[1].DTS - Frames[0].DTS, 1);
VI.fps_denominator *= DTSDiff; VI.fps_denominator *= DTSDiff;
} }
} }
@ -289,7 +292,7 @@ PVideoFrame FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
} else { } else {
// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat // 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) { if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
av_seek_frame(FormatContext, VideoTrack, (SeekMode == 3) ? FrameToDTS[n].DTS : FrameToDTS[ClosestKF].DTS, AVSEEK_FLAG_BACKWARD); av_seek_frame(FormatContext, VideoTrack, (SeekMode == 3) ? Frames[n].DTS : Frames[ClosestKF].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(VideoCodecContext); avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true; HasSeeked = true;
} }

View File

@ -22,7 +22,7 @@
#define FFMPEGSOURCE_H #define FFMPEGSOURCE_H
#ifndef NO_FLAC_CACHE #ifndef NO_FLAC_CACHE
#define FLAC_CACHE //#define FLAC_CACHE
#endif #endif
#include <windows.h> #include <windows.h>
@ -54,6 +54,29 @@ extern "C" {
enum AudioCacheFormat {acNone, acRaw, acFLAC}; enum AudioCacheFormat {acNone, acRaw, acFLAC};
struct FrameInfo {
int64_t DTS;
bool KeyFrame;
FrameInfo(int64_t ADTS, bool AKeyFrame) : DTS(ADTS), KeyFrame(AKeyFrame) {};
};
typedef std::vector<FrameInfo> FrameInfoVector;
struct SampleInfo {
int64_t SampleStart;
int64_t FilePos;
unsigned int FrameSize;
bool KeyFrame;
SampleInfo(int64_t ASampleStart, int64_t AFilePos, unsigned int AFrameSize, bool AKeyFrame) {
SampleStart = ASampleStart;
FilePos = AFilePos;
FrameSize = AFrameSize;
KeyFrame = AKeyFrame;
}
};
typedef std::vector<SampleInfo> SampleInfoVector;
int GetPPCPUFlags(IScriptEnvironment *Env); int GetPPCPUFlags(IScriptEnvironment *Env);
int GetSWSCPUFlags(IScriptEnvironment *Env); int GetSWSCPUFlags(IScriptEnvironment *Env);
CodecID MatroskaToFFCodecID(TrackInfo *TI); CodecID MatroskaToFFCodecID(TrackInfo *TI);
@ -93,15 +116,7 @@ protected:
FLAC__int32 *FLACBuffer; FLAC__int32 *FLACBuffer;
#endif // FLAC_CACHE #endif // FLAC_CACHE
FrameInfoVector Frames;
struct FrameInfo {
int64_t DTS;
bool KeyFrame;
FrameInfo(int64_t ADTS, bool AKeyFrame) : DTS(ADTS), KeyFrame(AKeyFrame) {};
};
std::vector<FrameInfo> FrameToDTS;
int FindClosestKeyFrame(int AFrame); int FindClosestKeyFrame(int AFrame);
int FrameFromDTS(int64_t ADTS); int FrameFromDTS(int64_t ADTS);
@ -149,7 +164,7 @@ private:
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env); int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS); int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
public: public:
FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, int ASeekMode, IScriptEnvironment* Env); FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, int ASeekMode, IScriptEnvironment *Env, FrameInfoVector *AFrames);
~FFmpegSource(); ~FFmpegSource();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env); PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env);
}; };
@ -170,9 +185,68 @@ private:
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env); int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env); int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env);
public: public:
FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, IScriptEnvironment* Env); FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, int AACCompression, const char *APPString, int AQuality, IScriptEnvironment *Env, FrameInfoVector *AFrames);
~FFMatroskaSource(); ~FFMatroskaSource();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env); PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env);
}; };
class FFAudioBase : public IClip{
protected:
VideoInfo VI;
uint8_t *DecodingBuffer;
SampleInfoVector SI;
size_t FindClosestAudioKeyFrame(int64_t Sample);
bool LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack);
bool SaveSampleInfoToFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack);
public:
FFAudioBase();
~FFAudioBase();
bool __stdcall GetParity(int n) { return false; }
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
const VideoInfo& __stdcall GetVideoInfo() { return VI; }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env) { return NULL; }
};
class FFmpegAudioSource : public FFAudioBase {
private:
AVFormatContext *FormatContext;
AVCodecContext *AudioCodecContext;
int AudioTrack;
FILE *RawCache;
unsigned int BufferSize;
uint8_t *Buffer;
bool LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ADemuxedAudioFile, const char *ASource, int AAudioTrack);
int DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env);
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
public:
FFmpegAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, const char *ADemuxedAudioFile, IScriptEnvironment *Env);
~FFmpegAudioSource();
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
};
class FFMatroskaAudioSource : public FFAudioBase {
private:
StdIoStream ST;
CompressedStream *AudioCS;
AVCodecContext *AudioCodecContext;
MatroskaFile *MF;
char ErrorMessage[256];
unsigned int BufferSize;
uint8_t *Buffer;
int ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env);
int DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env);
public:
FFMatroskaAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, IScriptEnvironment *Env);
~FFMatroskaAudioSource();
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
};
#endif #endif

View File

@ -23,15 +23,9 @@ Loads video files without sucking
<h2>Compatibility - Audio</h2> <h2>Compatibility - Audio</h2>
<ul> <ul>
<li>Sample accurate in all containers</li> <li>Should be sample accurate in all containers with audio cache</li>
</ul> </ul>
<h2>Loading the Plugin - Dll Hell</h2>
<p>
In order to load FFmpegSource all included dlls except ffmpegsource.dll must be located in the path or in the current working directory.
It is also possible to autoload it if you put all dlls and the avsi file in avisynth's autoloading directory (usually C:\Program Files\AviSynth 2.5\plugins).
</p>
<h2>Usage</h2> <h2>Usage</h2>
<p> <p>
<b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, int accompression = -1, string pp, int ppquality = 6, int seekmode = 1)</b><br /> <b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, int accompression = -1, string pp, int ppquality = 6, int seekmode = 1)</b><br />
@ -41,6 +35,14 @@ It is also possible to autoload it if you put all dlls and the avsi file in avis
Note that the audio cache will always be created when opening files with audio and that it will be huge since it stores all audio as raw signed 16/24/32 bit pcm, unsigned 8 bit pcm or as float, using flac to compress it is also possible to reduce the size by half. Note that the audio cache will always be created when opening files with audio and that it will be huge since it stores all audio as raw signed 16/24/32 bit pcm, unsigned 8 bit pcm or as float, using flac to compress it is also possible to reduce the size by half.
</p> </p>
<p>
<b>FFAudioSource(string source, int atrack = -1, string acachefile, string ademuxedfile)</b><br />
</p>
<p>
Experimental, may or may not be accurate enough for real usage.
</p>
<p> <p>
<b>FFPP(clip, string pp, int ppquality = 6)</b><br /> <b>FFPP(clip, string pp, int ppquality = 6)</b><br />
Separate postprocessing which also seems to include a few simple deinterlacers Separate postprocessing which also seems to include a few simple deinterlacers
@ -69,7 +71,12 @@ Separate postprocessing which also seems to include a few simple deinterlacers
<p> <p>
<b>vcachefile &amp; acachefile:</b> <b>vcachefile &amp; acachefile:</b>
Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(tracknumber)cache for audio. Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(d if FFAudioSource is used)(tracknumber)cache for audio.
</p>
<p>
<b>ademuxedfile:</b>
Specifies the file to store the demuxed audio stream in. Only used by FFAudioSource if the stream isn't matroska. If nothing is specified (source).ffasd(tracknumber)cache is used.
</p> </p>
<p> <p>
@ -155,6 +162,16 @@ tn:64:128:256
<p><b>Suggested Additional Options:</b> <p><b>Suggested Additional Options:</b>
--disable-encoders --disable-muxers --enable-small</p> --disable-encoders --disable-muxers --enable-small</p>
<h2>Changes</h2>
<ul>
<li>1.14<ul>
<li>If the output colorspace is YV12 or YUY2 the width and height may be automatically cropped by one pixel to make it an even number</li>
<li>FLAC cache is disabled because the static FLAC lib doesn't want to link</li>
<li>Added the experimental FFAudioSource which doesn't need a huge uncompressed cache</li>
<li>The plugin is now statically compiled</li>
<li>Updated FFmpeg to rev 11413</li>
</ul></li>
<h2>Changes</h2> <h2>Changes</h2>
<ul> <ul>
<li>1.13<ul> <li>1.13<ul>

View File

@ -90,13 +90,46 @@ AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnviro
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska"); bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext); av_close_input_file(FormatContext);
FrameInfoVector Frames;
if (IsMatroska) { if (IsMatroska) {
return new FFMatroskaSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, Env); return new FFMatroskaSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, Env, &Frames);
} else { } else {
// Do a separate indexing pass, enjoy the constructor sideeffects // Do a separate indexing pass, enjoy the constructor sideeffects
if (SeekMode == -1) if (SeekMode == -1)
delete new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, -2, Env); delete new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, -2, Env, &Frames);
return new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, SeekMode, Env); return new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, ACCompression, PPString, PPQuality, SeekMode, Env, &Frames);
}
}
AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!UserData) {
av_register_all();
UserData = (void *)-1;
}
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int ATrack = Args[1].AsInt(-1);
const char *ACacheFile = Args[2].AsString("");
const char *ADemuxedFile = Args[3].AsString("");
if (ATrack <= -2)
Env->ThrowError("FFmpegSource: No tracks selected");
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open %s", Args[0].AsString());
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska) {
return new FFMatroskaAudioSource(Source, ATrack, ACacheFile, Env);
} else {
return new FFmpegAudioSource(Source, ATrack, ACacheFile, ADemuxedFile, Env);
} }
} }
@ -106,6 +139,7 @@ AVSValue __cdecl CreateFFPP(AVSValue Args, void* UserData, IScriptEnvironment* E
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) { extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
Env->AddFunction("FFmpegSource", "[source]s[vtrack]i[atrack]i[timecodes]s[vcache]b[vcachefile]s[acachefile]s[accompression]i[pp]s[ppquality]i[seekmode]i", CreateFFmpegSource, 0); Env->AddFunction("FFmpegSource", "[source]s[vtrack]i[atrack]i[timecodes]s[vcache]b[vcachefile]s[acachefile]s[accompression]i[pp]s[ppquality]i[seekmode]i", CreateFFmpegSource, 0);
Env->AddFunction("FFAudioSource", "[source]s[atrack]i[acachefile]s[ademuxedfile]s", CreateFFAudioSource, 0);
Env->AddFunction("FFPP", "c[pp]s[ppquality]i", CreateFFPP, 0); Env->AddFunction("FFPP", "c[pp]s[ppquality]i", CreateFFPP, 0);
return "FFmpegSource"; return "FFmpegSource";
}; };