Fix an old bug that would drop the first or second video frame
Greatly improved audio decoding by adding a small internal cache

Originally committed to SVN as r2894.
This commit is contained in:
Fredrik Mellbin 2009-05-03 19:25:54 +00:00
parent c84a79fb7f
commit b55460ede5
8 changed files with 216 additions and 78 deletions

View File

@ -25,7 +25,79 @@
#define _snprintf snprintf #define _snprintf snprintf
#endif #endif
TAudioBlock::TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, int64_t SrcBytes) {
this->Start = Start;
this->Samples = Samples;
Data = new uint8_t[SrcBytes];
memcpy(Data, SrcData, SrcBytes);
}
TAudioBlock::~TAudioBlock() {
delete[] Data;
}
TAudioCache::TAudioCache() {
MaxCacheBlocks = 0;
BytesPerSample = 0;
}
TAudioCache::~TAudioCache() {
for (TAudioCache::iterator it=begin(); it != end(); it++)
delete *it;
}
void TAudioCache::Initialize(int BytesPerSample, int MaxCacheBlocks) {
this->BytesPerSample = BytesPerSample;
this->MaxCacheBlocks = MaxCacheBlocks;
}
void TAudioCache::CacheBlock(int64_t Start, int64_t Samples, uint8_t *SrcData) {
if (BytesPerSample > 0) {
for (TAudioCache::iterator it=begin(); it != end(); it++) {
if ((*it)->Start == Start) {
delete *it;
erase(it);
break;
}
}
push_front(new TAudioBlock(Start, Samples, SrcData, Samples * BytesPerSample));
if (size() >= MaxCacheBlocks) {
delete back();
pop_back();
}
}
}
bool TAudioCache::AudioBlockComp(TAudioBlock *A, TAudioBlock *B) {
return A->Start < B->Start;
}
int64_t TAudioCache::FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst) {
// May be better to move used blocks to the front
std::list<TAudioBlock *> UsedBlocks;
for (TAudioCache::iterator it=begin(); it != end(); it++) {
int64_t SrcOffset = FFMAX(0, Start - (*it)->Start);
int64_t DstOffset = FFMAX(0, (*it)->Start - Start);
int64_t CopySamples = FFMIN((*it)->Samples - SrcOffset, Samples - DstOffset);
if (CopySamples > 0) {
memcpy(Dst + DstOffset * BytesPerSample, (*it)->Data + SrcOffset * BytesPerSample, CopySamples * BytesPerSample);
UsedBlocks.push_back(*it);
}
}
UsedBlocks.sort(AudioBlockComp);
int64_t Ret = Start;
for (std::list<TAudioBlock *>::iterator it = UsedBlocks.begin(); it != UsedBlocks.end(); it++) {
if (it == UsedBlocks.begin() || Ret == (*it)->Start)
Ret = (*it)->Start + (*it)->Samples;
else
break;
}
return FFMIN(Ret, Start + Samples);
}
AudioBase::AudioBase() { AudioBase::AudioBase() {
CurrentSample = 0;
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * 10]; DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * 10];
}; };
@ -88,11 +160,14 @@ FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FrameIndex *Trac
} }
// Always try to decode a frame to make sure all required parameters are known // Always try to decode a frame to make sure all required parameters are known
uint8_t DummyBuf[512]; int64_t Dummy;
if (GetAudio(DummyBuf, 0, 1, ErrorMsg, MsgSize)) { if (DecodeNextAudioBlock(DecodingBuffer, &Dummy, ErrorMsg, MsgSize) < 0) {
Free(true); Free(true);
throw ErrorMsg; throw ErrorMsg;
} }
av_seek_frame(FormatContext, AudioTrack, Frames[0].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt); AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt);
AP.Channels = CodecContext->channels;; AP.Channels = CodecContext->channels;;
@ -105,6 +180,8 @@ FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FrameIndex *Trac
_snprintf(ErrorMsg, MsgSize, "Codec returned zero size audio"); _snprintf(ErrorMsg, MsgSize, "Codec returned zero size audio");
throw ErrorMsg; throw ErrorMsg;
} }
AudioCache.Initialize((AP.Channels *AP.BitsPerSample) / 8, 50);
} }
int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize) { int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize) {
@ -112,8 +189,8 @@ int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *Erro
int Ret = -1; int Ret = -1;
*Count = 0; *Count = 0;
AVPacket Packet, TempPacket; AVPacket Packet, TempPacket;
init_null_packet(&Packet); InitNullPacket(&Packet);
init_null_packet(&TempPacket); InitNullPacket(&TempPacket);
while (av_read_frame(FormatContext, &Packet) >= 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == AudioTrack) { if (Packet.stream_index == AudioTrack) {
@ -151,59 +228,76 @@ Done:
int FFAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) { int FFAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
const int64_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8; const int64_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 50, (int64_t)0);
memset(Buf, 0, SizeConst * Count); memset(Buf, 0, SizeConst * Count);
AVPacket Packet;
avcodec_flush_buffers(CodecContext); int PreDecBlocks = 0;
av_seek_frame(FormatContext, AudioTrack, Frames[CurrentAudioBlock].DTS, AVSEEK_FLAG_BACKWARD); uint8_t *DstBuf = static_cast<uint8_t *>(Buf);
// Establish where we actually are // Fill with everything in the cache
// Trigger on packet dts difference since groups can otherwise be indistinguishable int64_t CacheEnd = AudioCache.FillRequest(Start, Count, DstBuf);
int64_t LastDTS = - 1; // Was everything in the cache?
while (av_read_frame(FormatContext, &Packet) >= 0) { if (CacheEnd == Start + Count)
if (Packet.stream_index == AudioTrack) { return 0;
if (LastDTS < 0) {
LastDTS = Packet.dts;
} else if (LastDTS != Packet.dts) {
for (size_t i = 0; i < Frames.size(); i++)
if (Frames[i].DTS == Packet.dts) {
// The current match was consumed
CurrentAudioBlock = i + 1;
break;
}
av_free_packet(&Packet); size_t CurrentAudioBlock;
break; // Is seeking required to decode the requested samples?
// if (!(CurrentSample >= Start && CurrentSample <= CacheEnd)) {
if (CurrentSample != CacheEnd) {
PreDecBlocks = 15;
CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(CacheEnd) - PreDecBlocks - 20, (int64_t)0);
av_seek_frame(FormatContext, AudioTrack, Frames[CurrentAudioBlock].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(CodecContext);
AVPacket Packet;
InitNullPacket(&Packet);
// Establish where we actually are
// Trigger on packet dts difference since groups can otherwise be indistinguishable
int64_t LastDTS = - 1;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == AudioTrack) {
if (LastDTS < 0) {
LastDTS = Packet.dts;
} else if (LastDTS != Packet.dts) {
for (size_t i = 0; i < Frames.size(); i++)
if (Frames[i].DTS == Packet.dts) {
// The current match was consumed
CurrentAudioBlock = i + 1;
break;
}
av_free_packet(&Packet);
break;
}
} }
}
av_free_packet(&Packet); av_free_packet(&Packet);
}
} else {
CurrentAudioBlock = FindClosestAudioKeyFrame(CurrentSample);
} }
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount; int64_t DecodeCount;
do { do {
int64_t DecodeStart = Frames[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, ErrorMsg, MsgSize); int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, ErrorMsg, MsgSize);
if (Ret < 0) { if (Ret < 0) {
// FIXME // FIXME
//Env->ThrowError("Bleh, bad audio decoding"); //Env->ThrowError("Bleh, bad audio decoding");
} }
// Cache the block if enough blocks before it have been decoded to avoid garbage
if (PreDecBlocks == 0) {
AudioCache.CacheBlock(Frames[CurrentAudioBlock].SampleStart, DecodeCount, DecodingBuffer);
CacheEnd = AudioCache.FillRequest(CacheEnd, Start + Count - CacheEnd, DstBuf + (CacheEnd - Start) * SizeConst);
} else {
PreDecBlocks--;
}
CurrentAudioBlock++; CurrentAudioBlock++;
if (CurrentAudioBlock < Frames.size())
int64_t OffsetBytes = SizeConst * FFMAX(0, Start - DecodeStart); CurrentSample = Frames[CurrentAudioBlock].SampleStart;
int64_t CopyBytes = FFMAX(0, SizeConst * FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))); } while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < Frames.size());
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
if (SizeConst)
RemainingSamples -= CopyBytes / SizeConst;
} while (RemainingSamples > 0 && CurrentAudioBlock < Frames.size());
return 0; return 0;
} }
@ -282,11 +376,12 @@ MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, Fram
} }
// Always try to decode a frame to make sure all required parameters are known // Always try to decode a frame to make sure all required parameters are known
uint8_t DummyBuf[512]; int64_t Dummy;
if (GetAudio(DummyBuf, 0, 1, ErrorMsg, MsgSize)) { if (DecodeNextAudioBlock(DecodingBuffer, &Dummy, Frames[0].FilePos, Frames[0].FrameSize, ErrorMsg, MsgSize) < 0) {
Free(true); Free(true);
throw ErrorMsg; throw ErrorMsg;
} }
avcodec_flush_buffers(CodecContext);
AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt); AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt);
AP.Channels = CodecContext->channels;; AP.Channels = CodecContext->channels;;
@ -299,6 +394,8 @@ MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, Fram
_snprintf(ErrorMsg, MsgSize, "Codec returned zero size audio"); _snprintf(ErrorMsg, MsgSize, "Codec returned zero size audio");
throw ErrorMsg; throw ErrorMsg;
} }
AudioCache.Initialize((AP.Channels *AP.BitsPerSample) / 8, 50);
} }
MatroskaAudioSource::~MatroskaAudioSource() { MatroskaAudioSource::~MatroskaAudioSource() {
@ -307,34 +404,49 @@ MatroskaAudioSource::~MatroskaAudioSource() {
int MatroskaAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) { int MatroskaAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
const int64_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8; const int64_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 10, (int64_t)0);
avcodec_flush_buffers(CodecContext);
memset(Buf, 0, SizeConst * Count); memset(Buf, 0, SizeConst * Count);
uint8_t *DstBuf = (uint8_t *)Buf; int PreDecBlocks = 0;
int64_t RemainingSamples = Count; uint8_t *DstBuf = static_cast<uint8_t *>(Buf);
// Fill with everything in the cache
int64_t CacheEnd = AudioCache.FillRequest(Start, Count, DstBuf);
// Was everything in the cache?
if (CacheEnd == Start + Count)
return 0;
size_t CurrentAudioBlock;
// Is seeking required to decode the requested samples?
// if (!(CurrentSample >= Start && CurrentSample <= CacheEnd)) {
if (CurrentSample != CacheEnd) {
PreDecBlocks = 15;
CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(CacheEnd) - PreDecBlocks, (int64_t)0);
avcodec_flush_buffers(CodecContext);
} else {
CurrentAudioBlock = FindClosestAudioKeyFrame(CurrentSample);
}
int64_t DecodeCount; int64_t DecodeCount;
do { do {
int64_t DecodeStart = Frames[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, Frames[CurrentAudioBlock].FilePos, Frames[CurrentAudioBlock].FrameSize, ErrorMsg, MsgSize); int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, Frames[CurrentAudioBlock].FilePos, Frames[CurrentAudioBlock].FrameSize, ErrorMsg, MsgSize);
if (Ret < 0) { if (Ret < 0) {
// FIXME // FIXME
//Env->ThrowError("Bleh, bad audio decoding"); //Env->ThrowError("Bleh, bad audio decoding");
} }
// Cache the block if enough blocks before it have been decoded to avoid garbage
if (PreDecBlocks == 0) {
AudioCache.CacheBlock(Frames[CurrentAudioBlock].SampleStart, DecodeCount, DecodingBuffer);
CacheEnd = AudioCache.FillRequest(CacheEnd, Start + Count - CacheEnd, DstBuf + (CacheEnd - Start) * SizeConst);
} else {
PreDecBlocks--;
}
CurrentAudioBlock++; CurrentAudioBlock++;
if (CurrentAudioBlock < Frames.size())
int64_t OffsetBytes = SizeConst * FFMAX(0, Start - DecodeStart); CurrentSample = Frames[CurrentAudioBlock].SampleStart;
int64_t CopyBytes = FFMAX(0, SizeConst * FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))); } while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < Frames.size());
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
if (SizeConst)
RemainingSamples -= CopyBytes / SizeConst;
} while (RemainingSamples > 0 && CurrentAudioBlock < Frames.size());
return 0; return 0;
} }
@ -344,7 +456,7 @@ int MatroskaAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, uint
int Ret = -1; int Ret = -1;
*Count = 0; *Count = 0;
AVPacket TempPacket; AVPacket TempPacket;
init_null_packet(&TempPacket); InitNullPacket(&TempPacket);
// FIXME check return // FIXME check return
ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize); ReadFrame(FilePos, FrameSize, CS, MC, ErrorMsg, MsgSize);

View File

@ -27,6 +27,8 @@ extern "C" {
} }
#include <vector> #include <vector>
#include <list>
#include <memory>
#include "indexing.h" #include "indexing.h"
#include "utils.h" #include "utils.h"
#include "ffms.h" #include "ffms.h"
@ -42,8 +44,33 @@ extern "C" {
# include "guids.h" # include "guids.h"
#endif #endif
class TAudioBlock {
public:
int64_t Start;
int64_t Samples;
uint8_t *Data;
TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, int64_t SrcBytes);
~TAudioBlock();
};
class TAudioCache : protected std::list<TAudioBlock *> {
private:
int MaxCacheBlocks;
int BytesPerSample;
static bool AudioBlockComp(TAudioBlock *A, TAudioBlock *B);
public:
TAudioCache();
~TAudioCache();
void Initialize(int BytesPerSample, int MaxCacheBlocks);
void CacheBlock(int64_t Start, int64_t Samples, uint8_t *SrcData);
int64_t FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst);
};
class AudioBase { class AudioBase {
protected: protected:
TAudioCache AudioCache;
int64_t CurrentSample;
uint8_t *DecodingBuffer; uint8_t *DecodingBuffer;
FrameInfoVector Frames; FrameInfoVector Frames;
AVCodecContext *CodecContext; AVCodecContext *CodecContext;

View File

@ -234,6 +234,8 @@ Note that --enable-w32threads is required for multithreaded decoding to work.
<h2>Changes</h2> <h2>Changes</h2>
<ul> <ul>
<li>2.00 beta 8<ul> <li>2.00 beta 8<ul>
<li>Improved the audio decoding quality a lot by adding a simple cache, no more seeking is done when playing a file linearly and pops and other artifacts should be much more uncommon</li>
<li>Fixed a bug that would most of the time drop frame 0 and sometimes frame 1</li>
<li>Updated Haali's matroska parser code to the latest version</li> <li>Updated Haali's matroska parser code to the latest version</li>
<li>Updated FFmpeg to rev X</li> <li>Updated FFmpeg to rev X</li>
</ul></li> </ul></li>

View File

@ -87,8 +87,8 @@ VideoBase::VideoBase() {
PPContext = NULL; PPContext = NULL;
PPMode = NULL; PPMode = NULL;
SWS = NULL; SWS = NULL;
LastFrameNum = -1; LastFrameNum = 0;
CurrentFrame = 0; CurrentFrame = 1;
CodecContext = NULL; CodecContext = NULL;
DecodeFrame = avcodec_alloc_frame(); DecodeFrame = avcodec_alloc_frame();
PPFrame = DecodeFrame; PPFrame = DecodeFrame;
@ -269,7 +269,6 @@ FFVideoSource::FFVideoSource(const char *SourceFile, int Track, FrameIndex *Trac
// Cannot "output" to PPFrame without doing all other initialization // Cannot "output" to PPFrame without doing all other initialization
// This is the additional mess required for seekmode=-1 to work in a reasonable way // This is the additional mess required for seekmode=-1 to work in a reasonable way
OutputFrame(DecodeFrame); OutputFrame(DecodeFrame);
LastFrameNum = 0;
// Set AR variables // Set AR variables
VP.SARNum = CodecContext->sample_aspect_ratio.num; VP.SARNum = CodecContext->sample_aspect_ratio.num;
@ -282,7 +281,7 @@ FFVideoSource::~FFVideoSource() {
int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *ErrorMsg, unsigned MsgSize) { int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *ErrorMsg, unsigned MsgSize) {
AVPacket Packet; AVPacket Packet;
init_null_packet(&Packet); InitNullPacket(&Packet);
int FrameFinished = 0; int FrameFinished = 0;
*AStartTime = -1; *AStartTime = -1;
@ -303,7 +302,7 @@ int FFVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime, char *E
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) { if (CodecContext->has_b_frames) {
AVPacket NullPacket; AVPacket NullPacket;
init_null_packet(&NullPacket); InitNullPacket(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket); avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
} }
@ -495,7 +494,6 @@ MatroskaVideoSource::MatroskaVideoSource(const char *SourceFile, int Track,
// Output the already decoded frame so it isn't wasted // Output the already decoded frame so it isn't wasted
OutputFrame(DecodeFrame); OutputFrame(DecodeFrame);
LastFrameNum = 0;
// Set AR variables // Set AR variables
VP.SARNum = TI->AV.Video.DisplayWidth * TI->AV.Video.PixelHeight; VP.SARNum = TI->AV.Video.DisplayWidth * TI->AV.Video.PixelHeight;
@ -516,7 +514,7 @@ int MatroskaVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTi
int FrameFinished = 0; int FrameFinished = 0;
*AFirstStartTime = -1; *AFirstStartTime = -1;
AVPacket Packet; AVPacket Packet;
init_null_packet(&Packet); InitNullPacket(&Packet);
ulonglong StartTime, EndTime, FilePos; ulonglong StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize; unsigned int Track, FrameFlags, FrameSize;
@ -543,7 +541,7 @@ int MatroskaVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTi
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) { if (CodecContext->has_b_frames) {
AVPacket NullPacket; AVPacket NullPacket;
init_null_packet(&NullPacket); InitNullPacket(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket); avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
} }
@ -735,7 +733,6 @@ HaaliVideoSource::HaaliVideoSource(const char *SourceFile, int Track,
// Output the already decoded frame so it isn't wasted // Output the already decoded frame so it isn't wasted
OutputFrame(DecodeFrame); OutputFrame(DecodeFrame);
LastFrameNum = 0;
// Set AR variables // Set AR variables
CComVariant pV; CComVariant pV;
@ -756,7 +753,7 @@ int HaaliVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime,
int FrameFinished = 0; int FrameFinished = 0;
*AFirstStartTime = -1; *AFirstStartTime = -1;
AVPacket Packet; AVPacket Packet;
init_null_packet(&Packet); InitNullPacket(&Packet);
for (;;) { for (;;) {
CComPtr<IMMFrame> pMMF; CComPtr<IMMFrame> pMMF;
@ -789,7 +786,7 @@ int HaaliVideoSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime,
// Flush the last frames // Flush the last frames
if (CodecContext->has_b_frames) { if (CodecContext->has_b_frames) {
AVPacket NullPacket; AVPacket NullPacket;
init_null_packet(&NullPacket); InitNullPacket(&NullPacket);
avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket); avcodec_decode_video2(CodecContext, AFrame, &FrameFinished, &NullPacket);
} }

View File

@ -332,7 +332,7 @@ static FrameIndex *MakeHaaliIndex(const char *SourceFile, int IndexMask, int Dum
// //
AVPacket TempPacket; AVPacket TempPacket;
init_null_packet(&TempPacket); InitNullPacket(&TempPacket);
for (;;) { for (;;) {
if (IP) { if (IP) {
@ -489,7 +489,7 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
ulonglong StartTime, EndTime, FilePos; ulonglong StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize; unsigned int Track, FrameFlags, FrameSize;
AVPacket TempPacket; AVPacket TempPacket;
init_null_packet(&TempPacket); InitNullPacket(&TempPacket);
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) { while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
// Update progress // Update progress
@ -629,8 +629,8 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
FormatContext->streams[i]->codec->codec_type)); FormatContext->streams[i]->codec->codec_type));
AVPacket Packet, TempPacket; AVPacket Packet, TempPacket;
init_null_packet(&Packet); InitNullPacket(&Packet);
init_null_packet(&TempPacket); InitNullPacket(&TempPacket);
while (av_read_frame(FormatContext, &Packet) >= 0) { while (av_read_frame(FormatContext, &Packet) >= 0) {
// Update progress // Update progress
if (IP) { if (IP) {

View File

@ -25,7 +25,7 @@
#include "utils.h" #include "utils.h"
#include "ffms.h" #include "ffms.h"
#define INDEXVERSION 18 #define INDEXVERSION 20
#define INDEXID 0x53920873 #define INDEXID 0x53920873
struct IndexHeader { struct IndexHeader {

View File

@ -131,7 +131,7 @@ bool AudioFMTIsFloat(SampleFormat FMT){
} }
} }
void init_null_packet(AVPacket *pkt) { void InitNullPacket(AVPacket *pkt) {
av_init_packet(pkt); av_init_packet(pkt);
pkt->data = NULL; pkt->data = NULL;
pkt->size = 0; pkt->size = 0;

View File

@ -62,7 +62,7 @@ public:
int GetCPUFlags(); int GetCPUFlags();
int ReadFrame(uint64_t FilePos, unsigned int &FrameSize, CompressedStream *CS, MatroskaReaderContext &Context, char *ErrorMsg, unsigned MsgSize); int ReadFrame(uint64_t FilePos, unsigned int &FrameSize, CompressedStream *CS, MatroskaReaderContext &Context, char *ErrorMsg, unsigned MsgSize);
bool AudioFMTIsFloat(SampleFormat FMT); bool AudioFMTIsFloat(SampleFormat FMT);
void init_null_packet(AVPacket *pkt); void InitNullPacket(AVPacket *pkt);
#ifdef HAALISOURCE #ifdef HAALISOURCE
unsigned vtSize(VARIANT &vt); unsigned vtSize(VARIANT &vt);
void vtCopy(VARIANT& vt,void *dest); void vtCopy(VARIANT& vt,void *dest);