FFMS2 beta 6 (the real commit)

Requires a recent FFmpeg
Changes the HAALITS define into HAALISOURCE

Originally committed to SVN as r2780.
This commit is contained in:
Fredrik Mellbin 2009-04-11 18:45:40 +00:00
parent 5f5960ef00
commit 6878e5884a
11 changed files with 223 additions and 113 deletions

View File

@ -111,16 +111,18 @@ int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *Erro
const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8; const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
int Ret = -1; int Ret = -1;
*Count = 0; *Count = 0;
AVPacket Packet; AVPacket Packet, TempPacket;
av_init_packet(&Packet);
av_init_packet(&TempPacket);
while (av_read_frame(FormatContext, &Packet) >= 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == AudioTrack) { if (Packet.stream_index == AudioTrack) {
uint8_t *Data = Packet.data; TempPacket.data = Packet.data;
int Size = Packet.size; TempPacket.size = Packet.size;
while (Size > 0) { while (TempPacket.size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 10; int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 10;
Ret = avcodec_decode_audio2(CodecContext, (int16_t *)Buf, &TempOutputBufSize, Data, Size); Ret = avcodec_decode_audio3(CodecContext, (int16_t *)Buf, &TempOutputBufSize, &TempPacket);
if (Ret < 0) {// throw error or something? if (Ret < 0) {// throw error or something?
av_free_packet(&Packet); av_free_packet(&Packet);
@ -128,8 +130,8 @@ int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *Erro
} }
if (Ret > 0) { if (Ret > 0) {
Size -= Ret; TempPacket.size -= Ret;
Data += Ret; TempPacket.data += Ret;
Buf += TempOutputBufSize; Buf += TempOutputBufSize;
if (SizeConst) if (SizeConst)
*Count += TempOutputBufSize / SizeConst; *Count += TempOutputBufSize / SizeConst;
@ -341,22 +343,24 @@ int MatroskaAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, uint
const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8; const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
int Ret = -1; int Ret = -1;
*Count = 0; *Count = 0;
AVPacket TempPacket;
av_init_packet(&TempPacket);
// FIXME check return // FIXME check return
ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize); ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize);
int Size = FrameSize; TempPacket.size = FrameSize;
uint8_t *Data = MC.Buffer; TempPacket.data = MC.Buffer;
while (Size > 0) { while (TempPacket.size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE; int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(CodecContext, (int16_t *)Buf, &TempOutputBufSize, Data, Size); Ret = avcodec_decode_audio3(CodecContext, (int16_t *)Buf, &TempOutputBufSize, &TempPacket);
if (Ret < 0) // throw error or something? if (Ret < 0) // throw error or something?
goto Done; goto Done;
if (Ret > 0) { if (Ret > 0) {
Size -= Ret; TempPacket.size -= Ret;
Data += Ret; TempPacket.data += Ret;
Buf += TempOutputBufSize; Buf += TempOutputBufSize;
if (SizeConst) if (SizeConst)
*Count += TempOutputBufSize / SizeConst; *Count += TempOutputBufSize / SizeConst;

View File

@ -31,6 +31,17 @@ extern "C" {
#include "utils.h" #include "utils.h"
#include "ffms.h" #include "ffms.h"
#ifdef HAALISOURCE
# define _WIN32_DCOM
# include <windows.h>
# include <tchar.h>
# include <atlbase.h>
# include <dshow.h>
# include "CoParser.h"
# include <initguid.h>
# include "guids.h"
#endif
class AudioBase { class AudioBase {
protected: protected:
uint8_t *DecodingBuffer; uint8_t *DecodingBuffer;
@ -77,4 +88,21 @@ public:
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize); int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
}; };
#ifdef HAALISOURCE
class HaaliAudioSource : public AudioBase {
private:
CComPtr<IMMContainer> pMMC;
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize);
void Free(bool CloseCodec);
public:
HaaliAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
~HaaliAudioSource();
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
};
#endif // HAALISOURCE
#endif #endif

View File

@ -58,7 +58,8 @@ AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, Fram
// Set AR variables // Set AR variables
Env->SetVar("FFSAR_NUM", VP.SARNum); Env->SetVar("FFSAR_NUM", VP.SARNum);
Env->SetVar("FFSAR_DEN", VP.SARDen); Env->SetVar("FFSAR_DEN", VP.SARDen);
Env->SetVar("FFSAR", VP.SARNum / (double)VP.SARDen); if (VP.SARNum > 0 && VP.SARDen > 0)
Env->SetVar("FFSAR", VP.SARNum / (double)VP.SARDen);
// Set crop variables // Set crop variables
Env->SetVar("FFCROP_LEFT", VP.CropLeft); Env->SetVar("FFCROP_LEFT", VP.CropLeft);

View File

@ -67,8 +67,9 @@ FFMS_API(VideoBase *) FFMS_CreateVideoSource(const char *SourceFile, int Track,
switch (TrackIndices->Decoder) { switch (TrackIndices->Decoder) {
case 0: return new FFVideoSource(SourceFile, Track, TrackIndices, PP, Threads, SeekMode, ErrorMsg, MsgSize); case 0: return new FFVideoSource(SourceFile, Track, TrackIndices, PP, Threads, SeekMode, ErrorMsg, MsgSize);
case 1: return new MatroskaVideoSource(SourceFile, Track, TrackIndices, PP, Threads, ErrorMsg, MsgSize); case 1: return new MatroskaVideoSource(SourceFile, Track, TrackIndices, PP, Threads, ErrorMsg, MsgSize);
#ifdef HAALITS #ifdef HAALISOURCE
case 2: return new HaaliTSVideoSource(SourceFile, Track, TrackIndices, PP, Threads, ErrorMsg, MsgSize); case 2: return new HaaliVideoSource(SourceFile, Track, TrackIndices, PP, Threads, 0, ErrorMsg, MsgSize);
case 3: return new HaaliVideoSource(SourceFile, Track, TrackIndices, PP, Threads, 1, ErrorMsg, MsgSize);
#endif #endif
default: default:
_snprintf(ErrorMsg, MsgSize, "Unsupported format"); _snprintf(ErrorMsg, MsgSize, "Unsupported format");

View File

@ -49,7 +49,15 @@ class FrameInfoVector;
typedef int (FFMS_CC *IndexCallback)(int State, int64_t Current, int64_t Total, void *Private); typedef int (FFMS_CC *IndexCallback)(int State, int64_t Current, int64_t Total, void *Private);
enum TrackType { enum FFMS_SeekMode {
FFMS_SEEK_LINEAR_NO_RW = -1,
FFMS_SEEK_LINEAR = 0,
FFMS_SEEK_NORMAL = 1,
FFMS_SEEK_UNSAFE = 2,
FFMS_SEEK_AGGRESSIVE = 3,
};
enum FFMS_TrackType {
FFMS_TYPE_VIDEO = 0, FFMS_TYPE_VIDEO = 0,
FFMS_TYPE_AUDIO = 1, FFMS_TYPE_AUDIO = 1,
}; };

View File

@ -10,14 +10,21 @@ FFmpegSource2 Documentation
Opens files using ffmpeg and nothing else. May be frame accurate on good days. The source is MIT licensed and can be obtained from "http://svn.aegisub.net/trunk/aegisub/FFmpegSource2/". The precompiled binary is GPL licensed. If you are religious you may consider this the second coming. Opens files using ffmpeg and nothing else. May be frame accurate on good days. The source is MIT licensed and can be obtained from "http://svn.aegisub.net/trunk/aegisub/FFmpegSource2/". The precompiled binary is GPL licensed. If you are religious you may consider this the second coming.
</p> </p>
<h2>Known issues</h2>
<ul>
<li>Requires <a href='http://haali.cs.msu.ru/mkv/'>Haali's Media Splitter</a> if ogm or mpeg ps/ts is to be opened.</li>
<li>Avi files with NVOPs (rarely occurs in xvid and such) will desync when these frames are encountered. Remux to mkv/mp4 before opening to solve it for now.</li>
<li>The audio sources still aren't sample accurate and sometimes exhibit many interesting issues. Dumping the audio during indexing is the only workaround.
</ul>
<h2>Compatibility - Video</h2> <h2>Compatibility - Video</h2>
<ul> <ul>
<li>AVI, MKV, MP4, FLV: Frame accurate</li> <li>AVI, MKV, MP4, FLV: Frame accurate</li>
<li>WMV: Frame accurate(?) but avformat seems to pick keyframes relatively far away</li> <li>WMV: Frame accurate(?) but avformat seems to pick keyframes relatively far away</li>
<li>OGM: Messed up first frame and seeking produces smearing with seekmode=3, incredibly slow seeking without, remux to mkv or avi</li> <li>OGM: Frame accurate(?)</li>
<li>VOB: No rff flags applied, frame accurate?</li> <li>VOB: No rff flags applied, frame accurate?</li>
<li>MPG: Seeking seems to be off by one or two frames now and then</li> <li>MPG: Seeking seems to be off by one or two frames now and then</li>
<li>M2TS, TS: Linear access only (seekmode=-1)</li> <li>M2TS, TS: Seeking seems to be off a few frames here and there</li>
<li>Image files: Most formats can be opened if seekmode=-1 is set</li> <li>Image files: Most formats can be opened if seekmode=-1 is set</li>
</ul> </ul>
@ -227,8 +234,11 @@ Note that --enable-w32threads is required for multithreaded decoding to work.
<h2>Changes</h2> <h2>Changes</h2>
<ul> <ul>
<li>2.00 beta 6<ul> <li>2.00 beta 6<ul>
<li>Haali's splitters have been improved for video and now have audio dumping during indexing implemented</li>
<li>SeekMode=1 has improved logic which will make it go back and decode more frames if necessary to figure out where it is, in theory SeekMode=0 should now be mostly obsolete</li>
<li>Haali's splitters are now used to open mpeg ps and ogm in addition to mpeg ts, only ogm is frame accurate at this time</li>
<li>Negative timecodes and other bugs caused by an integer overflow fixed</li> <li>Negative timecodes and other bugs caused by an integer overflow fixed</li>
<li>Updated FFmpeg to rev X (once again compilation fixes for the changes)</li> <li>Updated FFmpeg to rev 18442 (once again compilation fixes for the changes)</li>
</ul></li> </ul></li>
<li>2.00 beta 5<ul> <li>2.00 beta 5<ul>

View File

@ -282,6 +282,7 @@ FFVideoSource::~FFVideoSource() {
int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *ErrorMsg, unsigned MsgSize) { int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *ErrorMsg, unsigned MsgSize) {
AVPacket Packet; AVPacket Packet;
av_init_packet(&Packet);
int FrameFinished = 0; int FrameFinished = 0;
*AStartTime = -1; *AStartTime = -1;
@ -290,7 +291,7 @@ int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *E
if (*AStartTime < 0) if (*AStartTime < 0)
*AStartTime = Packet.dts; *AStartTime = Packet.dts;
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, Packet.data, Packet.size); avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &Packet);
} }
av_free_packet(&Packet); av_free_packet(&Packet);
@ -300,8 +301,11 @@ int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *E
} }
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) if (CodecContext->has_b_frames) {
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, NULL, 0); AVPacket NullPacket;
av_init_packet(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
}
if (!FrameFinished) if (!FrameFinished)
goto Error; goto Error;
@ -318,6 +322,7 @@ AVFrameLite *FFVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
return OutputFrame(DecodeFrame); return OutputFrame(DecodeFrame);
bool HasSeeked = false; bool HasSeeked = false;
int SeekOffset = 0;
if (SeekMode >= 0) { if (SeekMode >= 0) {
int ClosestKF = Frames.FindClosestKeyFrame(n); int ClosestKF = Frames.FindClosestKeyFrame(n);
@ -331,7 +336,8 @@ AVFrameLite *FFVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
} else { } else {
// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat // 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) { if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
av_seek_frame(FormatContext, VideoTrack, (SeekMode == 3) ? Frames[n].DTS : Frames[ClosestKF].DTS, AVSEEK_FLAG_BACKWARD); ReSeek:
av_seek_frame(FormatContext, VideoTrack, (SeekMode == 3) ? Frames[n].DTS : Frames[ClosestKF + SeekOffset].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext); avcodec_flush_buffers(CodecContext);
HasSeeked = true; HasSeeked = true;
} }
@ -353,8 +359,14 @@ AVFrameLite *FFVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
if (StartTime < 0 || (CurrentFrame = Frames.FrameFromDTS(StartTime)) < 0) { if (StartTime < 0 || (CurrentFrame = Frames.FrameFromDTS(StartTime)) < 0) {
switch (SeekMode) { switch (SeekMode) {
case 1: case 1:
_snprintf(ErrorMsg, MsgSize, "Frame accurate seeking is not possible in this file"); // No idea where we are so go back a bit further
return NULL; if (n + SeekOffset == 0) {
_snprintf(ErrorMsg, MsgSize, "Frame accurate seeking is not possible in this file\n");
return NULL;
}
SeekOffset -= FFMIN(20, n + SeekOffset);
goto ReSeek;
case 2: case 2:
case 3: case 3:
CurrentFrame = Frames.ClosestFrameFromDTS(StartTime); CurrentFrame = Frames.ClosestFrameFromDTS(StartTime);
@ -502,6 +514,8 @@ MatroskaVideoSource::~MatroskaVideoSource() {
int MatroskaVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize) { int MatroskaVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize) {
int FrameFinished = 0; int FrameFinished = 0;
*AFirstStartTime = -1; *AFirstStartTime = -1;
AVPacket Packet;
av_init_packet(&Packet);
ulonglong StartTime, EndTime, FilePos; ulonglong StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize; unsigned int Track, FrameFlags, FrameSize;
@ -513,15 +527,24 @@ int MatroskaVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTi
if (ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize)) if (ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize))
return 1; return 1;
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, MC.Buffer, FrameSize); Packet.data = MC.Buffer;
Packet.size = FrameSize;
if (FrameFlags & FRAME_KF)
Packet.flags = PKT_FLAG_KEY;
else
Packet.flags = 0;
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &Packet);
if (FrameFinished) if (FrameFinished)
goto Done; goto Done;
} }
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) if (CodecContext->has_b_frames) {
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, NULL, 0); AVPacket NullPacket;
av_init_packet(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
}
if (!FrameFinished) if (!FrameFinished)
goto Error; goto Error;
@ -565,17 +588,17 @@ AVFrameLite *MatroskaVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSi
return OutputFrame(DecodeFrame); return OutputFrame(DecodeFrame);
} }
#ifdef HAALITS #ifdef HAALISOURCE
void HaaliTSVideoSource::Free(bool CloseCodec) { void HaaliVideoSource::Free(bool CloseCodec) {
if (CloseCodec) if (CloseCodec)
avcodec_close(CodecContext); avcodec_close(CodecContext);
av_free(CodecContext); av_free(CodecContext);
} }
HaaliTSVideoSource::HaaliTSVideoSource(const char *SourceFile, int Track, HaaliVideoSource::HaaliVideoSource(const char *SourceFile, int Track,
FrameIndex *TrackIndices, const char *PP, FrameIndex *TrackIndices, const char *PP,
int Threads, char *ErrorMsg, unsigned MsgSize) { int Threads, int SourceMode, char *ErrorMsg, unsigned MsgSize) {
AVCodec *Codec = NULL; AVCodec *Codec = NULL;
CodecContext = NULL; CodecContext = NULL;
@ -589,20 +612,22 @@ HaaliTSVideoSource::HaaliTSVideoSource(const char *SourceFile, int Track,
::CoInitializeEx(NULL, COINIT_MULTITHREADED); ::CoInitializeEx(NULL, COINIT_MULTITHREADED);
CLSID clsid = Haali_TS_Parser; CLSID clsid = HAALI_TS_Parser;
if (SourceMode == 1)
clsid = HAALI_OGM_Parser;
if (FAILED(pMMC.CoCreateInstance(clsid))) { if (FAILED(pMMC.CoCreateInstance(clsid))) {
_snprintf(ErrorMsg, MsgSize, "Can't create parser"); _snprintf(ErrorMsg, MsgSize, "Can't create parser");
throw ErrorMsg; throw ErrorMsg;
} }
CComPtr<IMemAlloc> pMA; CComPtr<IMemAlloc> pMA;
if (FAILED(pMA.CoCreateInstance(CLSID_MemAlloc))) { if (FAILED(pMA.CoCreateInstance(CLSID_MemAlloc))) {
_snprintf(ErrorMsg, MsgSize, "Can't create memory allocator"); _snprintf(ErrorMsg, MsgSize, "Can't create memory allocator");
throw ErrorMsg; throw ErrorMsg;
} }
CComPtr<IMMStream> pMS; CComPtr<IMMStream> pMS;
if (FAILED(pMS.CoCreateInstance(CLSID_DiskFile))) { if (FAILED(pMS.CoCreateInstance(CLSID_DiskFile))) {
_snprintf(ErrorMsg, MsgSize, "Can't create disk file reader"); _snprintf(ErrorMsg, MsgSize, "Can't create disk file reader");
throw ErrorMsg; throw ErrorMsg;
@ -626,11 +651,13 @@ HaaliTSVideoSource::HaaliTSVideoSource(const char *SourceFile, int Track,
int CodecPrivateSize = 0; int CodecPrivateSize = 0;
int CurrentTrack = 0; int CurrentTrack = 0;
CComPtr<IEnumUnknown> pEU; CComPtr<IEnumUnknown> pEU;
CComQIPtr<IPropertyBag> pBag;
if (SUCCEEDED(pMMC->EnumTracks(&pEU))) { if (SUCCEEDED(pMMC->EnumTracks(&pEU))) {
CComPtr<IUnknown> pU; CComPtr<IUnknown> pU;
while (pEU->Next(1, &pU, NULL) == S_OK) { while (pEU->Next(1, &pU, NULL) == S_OK) {
if (CurrentTrack++ == Track) { if (CurrentTrack++ == Track) {
CComQIPtr<IPropertyBag> pBag = pU; pBag = pU;
if (pBag) { if (pBag) {
VARIANT pV; VARIANT pV;
@ -700,7 +727,7 @@ HaaliTSVideoSource::HaaliTSVideoSource(const char *SourceFile, int Track,
if (Frames.size() >= 2) { if (Frames.size() >= 2) {
double DTSDiff = (double)(Frames.back().DTS - Frames.front().DTS); double DTSDiff = (double)(Frames.back().DTS - Frames.front().DTS);
// FIXME // FIXME
VP.FPSDenominator = (unsigned int)((DTSDiff * 1000000000) / (double)1000 / (double)(VP.NumFrames - 1) + 0.5); VP.FPSDenominator = (unsigned int)((DTSDiff * 1000000) / (double)(VP.NumFrames - 1) + 0.5);
VP.FPSNumerator = 1000000; VP.FPSNumerator = 1000000;
} }
@ -709,23 +736,22 @@ HaaliTSVideoSource::HaaliTSVideoSource(const char *SourceFile, int Track,
LastFrameNum = 0; LastFrameNum = 0;
// Set AR variables // Set AR variables
// VP.SARNum = TI->AV.Video.DisplayWidth * TI->AV.Video.PixelHeight; VARIANT pV;
// VP.SARDen = TI->AV.Video.DisplayHeight * TI->AV.Video.PixelWidth; if (pBag->Read(L"Video.DisplayWidth", &pV, NULL) == S_OK)
VP.SARNum = pV.uiVal;
// Set crop variables if (pBag->Read(L"Video.DisplayHeight", &pV, NULL) == S_OK)
// VP.CropLeft = TI->AV.Video.CropL; VP.SARDen = pV.uiVal;
// VP.CropRight = TI->AV.Video.CropR;
// VP.CropTop = TI->AV.Video.CropT;
// VP.CropBottom = TI->AV.Video.CropB;
} }
HaaliTSVideoSource::~HaaliTSVideoSource() { HaaliVideoSource::~HaaliVideoSource() {
Free(true); Free(true);
} }
int HaaliTSVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize) { int HaaliVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize) {
int FrameFinished = 0; int FrameFinished = 0;
*AFirstStartTime = -1; *AFirstStartTime = -1;
AVPacket Packet;
av_init_packet(&Packet);
for (;;) { for (;;) {
CComPtr<IMMFrame> pMMF; CComPtr<IMMFrame> pMMF;
@ -741,7 +767,14 @@ int HaaliTSVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTim
if (FAILED(pMMF->GetPointer(&Data))) if (FAILED(pMMF->GetPointer(&Data)))
goto Error; goto Error;
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, Data, pMMF->GetActualDataLength()); Packet.data = Data;
Packet.size = pMMF->GetActualDataLength();
if (pMMF->IsSyncPoint() == S_OK)
Packet.flags = PKT_FLAG_KEY;
else
Packet.flags = 0;
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &Packet);
if (FrameFinished) if (FrameFinished)
goto Done; goto Done;
@ -749,8 +782,11 @@ int HaaliTSVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTim
} }
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) if (CodecContext->has_b_frames) {
avcodec_decode_video(CodecContext, AFrame, &FrameFinished, NULL, 0); AVPacket NullPacket;
av_init_packet(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
}
if (!FrameFinished) if (!FrameFinished)
goto Error; goto Error;
@ -760,18 +796,18 @@ Done:
return 0; return 0;
} }
AVFrameLite *HaaliTSVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) { AVFrameLite *HaaliVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSize) {
// PPFrame always holds frame LastFrameNum even if no PP is applied // PPFrame always holds frame LastFrameNum even if no PP is applied
if (LastFrameNum == n) if (LastFrameNum == n)
return OutputFrame(DecodeFrame); return OutputFrame(DecodeFrame);
bool HasSeeked = false; bool HasSeeked = false;
int SeekOffset = 0;
if (n < CurrentFrame || Frames.FindClosestKeyFrame(n) > CurrentFrame) { if (n < CurrentFrame || Frames.FindClosestKeyFrame(n) > CurrentFrame + 10) {
int64_t dtsp = Frames[n].DTS; ReSeek:
pMMC->Seek(Frames[n].DTS, MMSF_PREV_KF); pMMC->Seek(Frames[n + SeekOffset].DTS, MMSF_PREV_KF);
// FIXME for some reason required to make it seek properly avcodec_flush_buffers(CodecContext);
//avcodec_flush_buffers(CodecContext);
HasSeeked = true; HasSeeked = true;
} }
@ -784,8 +820,14 @@ AVFrameLite *HaaliTSVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSiz
HasSeeked = false; HasSeeked = false;
if (StartTime < 0 || (CurrentFrame = Frames.FrameFromDTS(StartTime)) < 0) { if (StartTime < 0 || (CurrentFrame = Frames.FrameFromDTS(StartTime)) < 0) {
_snprintf(ErrorMsg, MsgSize, "Frame accurate seeking is not possible in this file\n"); // No idea where we are so go back a bit further
return NULL; if (n + SeekOffset == 0) {
_snprintf(ErrorMsg, MsgSize, "Frame accurate seeking is not possible in this file\n");
return NULL;
}
SeekOffset -= FFMIN(20, n + SeekOffset);
goto ReSeek;
} }
} }
@ -796,4 +838,4 @@ AVFrameLite *HaaliTSVideoSource::GetFrame(int n, char *ErrorMsg, unsigned MsgSiz
return OutputFrame(DecodeFrame); return OutputFrame(DecodeFrame);
} }
#endif // HAALITS #endif // HAALISOURCE

View File

@ -33,7 +33,7 @@ extern "C" {
#include "utils.h" #include "utils.h"
#include "ffms.h" #include "ffms.h"
#ifdef HAALITS #ifdef HAALISOURCE
# define _WIN32_DCOM # define _WIN32_DCOM
# include <windows.h> # include <windows.h>
# include <tchar.h> # include <tchar.h>
@ -101,20 +101,20 @@ public:
AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize); AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
}; };
#ifdef HAALITS #ifdef HAALISOURCE
class HaaliTSVideoSource : public VideoBase { class HaaliVideoSource : public VideoBase {
private: private:
CComPtr<IMMContainer> pMMC; CComPtr<IMMContainer> pMMC;
void Free(bool CloseCodec); void Free(bool CloseCodec);
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize); int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, char *ErrorMsg, unsigned MsgSize);
public: public:
HaaliTSVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, char *ErrorMsg, unsigned MsgSize); HaaliVideoSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, const char *PP, int Threads, int SourceMode, char *ErrorMsg, unsigned MsgSize);
~HaaliTSVideoSource(); ~HaaliVideoSource();
AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize); AVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize);
}; };
#endif // HAALITS #endif // HAALISOURCE
#endif #endif

View File

@ -116,9 +116,14 @@ DEFINE_GUID(MEDIASUBTYPE_WMV3,
0x33564d57, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71); 0x33564d57, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71);
// FIXME: move somewhere else? // FIXME: move somewhere else?
DEFINE_GUID(Haali_TS_Parser, DEFINE_GUID(HAALI_TS_Parser,
0xB841F346, 0x4835, 0x4de8, 0xAA, 0x5E, 0x2E, 0x7C, 0xD2, 0xD4, 0xC4, 0x35); 0xB841F346, 0x4835, 0x4de8, 0xAA, 0x5E, 0x2E, 0x7C, 0xD2, 0xD4, 0xC4, 0x35);
DEFINE_GUID(HAALI_OGM_Parser,
0xDB43B405, 0x43AA, 0x4F01, 0x82, 0xD8, 0xD8, 0x4D, 0x47, 0xE6, 0x01, 0x9C);
//DB43B405-43AA-4F01-82D8-D84D47E6019C
typedef struct tagVORBISFORMAT2 typedef struct tagVORBISFORMAT2
{ {
DWORD Channels; DWORD Channels;

View File

@ -95,20 +95,20 @@ public:
} }
}; };
#ifdef HAALITS #ifdef HAALISOURCE
class HaaliTSIndexMemory { class HaaliIndexMemory {
private: private:
int16_t *DecodingBuffer; int16_t *DecodingBuffer;
MatroskaAudioContext *AudioContexts; MatroskaAudioContext *AudioContexts;
public: public:
HaaliTSIndexMemory(int Tracks, int16_t *&DecodingBuffer, MatroskaAudioContext *&AudioContexts) { HaaliIndexMemory(int Tracks, int16_t *&DecodingBuffer, MatroskaAudioContext *&AudioContexts) {
DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE*10]; DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE*10];
AudioContexts = new MatroskaAudioContext[Tracks]; AudioContexts = new MatroskaAudioContext[Tracks];
this->DecodingBuffer = DecodingBuffer; this->DecodingBuffer = DecodingBuffer;
this->AudioContexts = AudioContexts; this->AudioContexts = AudioContexts;
} }
~HaaliTSIndexMemory() { ~HaaliIndexMemory() {
delete [] DecodingBuffer; delete [] DecodingBuffer;
delete [] AudioContexts; delete [] AudioContexts;
} }
@ -202,11 +202,13 @@ int WriteIndex(const char *IndexFile, FrameIndex *TrackIndices, char *ErrorMsg,
return 0; return 0;
} }
#ifdef HAALITS #ifdef HAALISOURCE
static FrameIndex *MakeHaaliTSIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) { static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int DumpMask, const char *AudioFile, bool IgnoreDecodeErrors, int SourceMode, IndexCallback IP, void *Private, char *ErrorMsg, unsigned MsgSize) {
::CoInitializeEx(NULL, COINIT_MULTITHREADED); ::CoInitializeEx(NULL, COINIT_MULTITHREADED);
CLSID clsid = Haali_TS_Parser; CLSID clsid = HAALI_TS_Parser;
if (SourceMode == 1)
clsid = HAALI_OGM_Parser;
CComPtr<IMMContainer> pMMC; CComPtr<IMMContainer> pMMC;
if (FAILED(pMMC.CoCreateInstance(clsid))) { if (FAILED(pMMC.CoCreateInstance(clsid))) {
@ -253,10 +255,12 @@ static FrameIndex *MakeHaaliTSIndex(const char *SourceFile, int IndexMask, int D
int16_t *db; int16_t *db;
MatroskaAudioContext *AudioContexts; MatroskaAudioContext *AudioContexts;
HaaliTSIndexMemory IM = HaaliTSIndexMemory(NumTracks, db, AudioContexts); HaaliIndexMemory IM = HaaliIndexMemory(NumTracks, db, AudioContexts);
FrameIndex *TrackIndices = new FrameIndex(); FrameIndex *TrackIndices = new FrameIndex();
TrackIndices->Decoder = 2; TrackIndices->Decoder = 2;
if (SourceMode == 1)
TrackIndices->Decoder = 3;
int TrackTypes[32]; int TrackTypes[32];
int CurrentTrack = 0; int CurrentTrack = 0;
@ -322,6 +326,9 @@ static FrameIndex *MakeHaaliTSIndex(const char *SourceFile, int IndexMask, int D
} }
// //
AVPacket TempPacket;
av_init_packet(&TempPacket);
for (;;) { for (;;) {
if (IP) { if (IP) {
if ((*IP)(0, 0, 1, Private)) { if ((*IP)(0, 0, 1, Private)) {
@ -344,20 +351,18 @@ static FrameIndex *MakeHaaliTSIndex(const char *SourceFile, int IndexMask, int D
if (TrackTypes[CurrentTrack] == TT_VIDEO) { if (TrackTypes[CurrentTrack] == TT_VIDEO) {
(*TrackIndices)[CurrentTrack].push_back(FrameInfo(Ts, pMMF->IsSyncPoint() == S_OK)); (*TrackIndices)[CurrentTrack].push_back(FrameInfo(Ts, pMMF->IsSyncPoint() == S_OK));
} else if (TrackTypes[CurrentTrack] == TT_AUDIO && (IndexMask & (1 << CurrentTrack))) { } else if (TrackTypes[CurrentTrack] == TT_AUDIO && (IndexMask & (1 << CurrentTrack))) {
/* (*TrackIndices)[Track].push_back(FrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0)); (*TrackIndices)[CurrentTrack].push_back(FrameInfo(AudioContexts[CurrentTrack].CurrentSample, 0 /* FIXME? */, pMMF->GetActualDataLength(), pMMF->IsSyncPoint() == S_OK));
ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize); AVCodecContext *AudioCodecContext = AudioContexts[CurrentTrack].CTX;
pMMF->GetPointer(&TempPacket.data);
TempPacket.size = pMMF->GetActualDataLength();
int Size = FrameSize; while (TempPacket.size > 0) {
uint8_t *Data = MC.Buffer;
AVCodecContext *AudioCodecContext = AudioContexts[Track].CTX;
while (Size > 0) {
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10; int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
int Ret = avcodec_decode_audio2(AudioCodecContext, db, &dbsize, Data, Size); int Ret = avcodec_decode_audio3(AudioCodecContext, db, &dbsize, &TempPacket);
if (Ret < 0) { if (Ret < 0) {
if (IgnoreDecodeErrors) { if (IgnoreDecodeErrors) {
(*TrackIndices)[Track].clear(); (*TrackIndices)[CurrentTrack].clear();
IndexMask &= ~(1 << Track); IndexMask &= ~(1 << CurrentTrack);
break; break;
} else { } else {
_snprintf(ErrorMsg, MsgSize, "Audio decoding error"); _snprintf(ErrorMsg, MsgSize, "Audio decoding error");
@ -367,30 +372,28 @@ static FrameIndex *MakeHaaliTSIndex(const char *SourceFile, int IndexMask, int D
} }
if (Ret > 0) { if (Ret > 0) {
Size -= Ret; TempPacket.size -= Ret;
Data += Ret; TempPacket.data += Ret;
} }
if (dbsize > 0) if (dbsize > 0)
AudioContexts[Track].CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_format(AudioCodecContext->sample_fmt) * AudioCodecContext->channels); AudioContexts[CurrentTrack].CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_format(AudioCodecContext->sample_fmt) * AudioCodecContext->channels);
if (dbsize > 0 && (DumpMask & (1 << Track))) { if (dbsize > 0 && (DumpMask & (1 << CurrentTrack))) {
// Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers. // Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers.
if (!AudioContexts[Track].W64W) { if (!AudioContexts[CurrentTrack].W64W) {
char ABuf[50]; char ABuf[50];
std::string WN(AudioFile); std::string WN(AudioFile);
int Offset = StartTime * mkv_TruncFloat(mkv_GetTrackInfo(MF, Track)->TimecodeScale) / (double)1000000; _snprintf(ABuf, sizeof(ABuf), ".%02d.delay.%d.w64", CurrentTrack, 0);
_snprintf(ABuf, sizeof(ABuf), ".%02d.delay.%d.w64", Track, Offset);
WN += ABuf; WN += ABuf;
AudioContexts[Track].W64W = new Wave64Writer(WN.c_str(), av_get_bits_per_sample_format(AudioCodecContext->sample_fmt), AudioContexts[CurrentTrack].W64W = new Wave64Writer(WN.c_str(), av_get_bits_per_sample_format(AudioCodecContext->sample_fmt),
AudioCodecContext->channels, AudioCodecContext->sample_rate, AudioFMTIsFloat(AudioCodecContext->sample_fmt)); AudioCodecContext->channels, AudioCodecContext->sample_rate, AudioFMTIsFloat(AudioCodecContext->sample_fmt));
} }
AudioContexts[Track].W64W->WriteData(db, dbsize); AudioContexts[CurrentTrack].W64W->WriteData(db, dbsize);
} }
} }
*/
} }
} }
@ -480,6 +483,8 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
ulonglong StartTime, EndTime, FilePos; ulonglong StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize; unsigned int Track, FrameFlags, FrameSize;
AVPacket TempPacket;
av_init_packet(&TempPacket);
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) { while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
// Update progress // Update progress
@ -497,14 +502,13 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
} else if (mkv_GetTrackInfo(MF, Track)->Type == TT_AUDIO && (IndexMask & (1 << Track))) { } else if (mkv_GetTrackInfo(MF, Track)->Type == TT_AUDIO && (IndexMask & (1 << Track))) {
(*TrackIndices)[Track].push_back(FrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0)); (*TrackIndices)[Track].push_back(FrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize); ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize);
int Size = FrameSize;
uint8_t *Data = MC.Buffer;
AVCodecContext *AudioCodecContext = AudioContexts[Track].CTX; AVCodecContext *AudioCodecContext = AudioContexts[Track].CTX;
TempPacket.data = MC.Buffer;
TempPacket.size = FrameSize;
while (Size > 0) { while (TempPacket.size > 0) {
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10; int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
int Ret = avcodec_decode_audio2(AudioCodecContext, db, &dbsize, Data, Size); int Ret = avcodec_decode_audio3(AudioCodecContext, db, &dbsize, &TempPacket);
if (Ret < 0) { if (Ret < 0) {
if (IgnoreDecodeErrors) { if (IgnoreDecodeErrors) {
(*TrackIndices)[Track].clear(); (*TrackIndices)[Track].clear();
@ -518,8 +522,8 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
} }
if (Ret > 0) { if (Ret > 0) {
Size -= Ret; TempPacket.size -= Ret;
Data += Ret; TempPacket.data += Ret;
} }
if (dbsize > 0) if (dbsize > 0)
@ -563,11 +567,16 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
return MakeMatroskaIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, IP, Private, ErrorMsg, MsgSize); return MakeMatroskaIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, IP, Private, ErrorMsg, MsgSize);
} }
#ifdef HAALITS #ifdef HAALISOURCE
// Do haali ts indexing instead? // Do haali ts indexing instead?
if (!strcmp(FormatContext->iformat->name, "mpegts")) { if (!strcmp(FormatContext->iformat->name, "mpeg") || !strcmp(FormatContext->iformat->name, "mpegts")) {
av_close_input_file(FormatContext); av_close_input_file(FormatContext);
return MakeHaaliTSIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, IP, Private, ErrorMsg, MsgSize); return MakeHaaliIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, 0, IP, Private, ErrorMsg, MsgSize);
}
if (!strcmp(FormatContext->iformat->name, "ogg")) {
av_close_input_file(FormatContext);
return MakeHaaliIndex(SourceFile, IndexMask, DumpMask, AudioFile, IgnoreDecodeErrors, 1, IP, Private, ErrorMsg, MsgSize);
} }
#endif #endif
@ -614,7 +623,9 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
FormatContext->streams[i]->time_base.den, FormatContext->streams[i]->time_base.den,
FormatContext->streams[i]->codec->codec_type)); FormatContext->streams[i]->codec->codec_type));
AVPacket Packet; AVPacket Packet, TempPacket;
av_init_packet(&Packet);
av_init_packet(&TempPacket);
while (av_read_frame(FormatContext, &Packet) >= 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
// Update progress // Update progress
if (IP) { if (IP) {
@ -631,12 +642,12 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
} else if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO && (IndexMask & (1 << Packet.stream_index))) { } else if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO && (IndexMask & (1 << Packet.stream_index))) {
(*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, AudioContexts[Packet.stream_index].CurrentSample, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0)); (*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, AudioContexts[Packet.stream_index].CurrentSample, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec; AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec;
int Size = Packet.size; TempPacket.data = Packet.data;
uint8_t *Data = Packet.data; TempPacket.size = Packet.size;
while (Size > 0) { while (TempPacket.size > 0) {
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10; int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
int Ret = avcodec_decode_audio2(AudioCodecContext, db, &dbsize, Data, Size); int Ret = avcodec_decode_audio3(AudioCodecContext, db, &dbsize, &TempPacket);
if (Ret < 0) { if (Ret < 0) {
if (IgnoreDecodeErrors) { if (IgnoreDecodeErrors) {
(*TrackIndices)[Packet.stream_index].clear(); (*TrackIndices)[Packet.stream_index].clear();
@ -650,8 +661,8 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
} }
if (Ret > 0) { if (Ret > 0) {
Size -= Ret; TempPacket.size -= Ret;
Data += Ret; TempPacket.data += Ret;
} }
if (dbsize > 0) if (dbsize > 0)

View File

@ -25,7 +25,7 @@
#include "utils.h" #include "utils.h"
#include "ffms.h" #include "ffms.h"
#define INDEXVERSION 15 #define INDEXVERSION 17
#define INDEXID 0x53920873 #define INDEXID 0x53920873
struct IndexHeader { struct IndexHeader {