mirror of https://github.com/odrling/Aegisub
FFmpegSource2: more audio sourcing
Originally committed to SVN as r2392.
This commit is contained in:
parent
7d75796ee1
commit
fc878f0027
|
@ -41,6 +41,208 @@ size_t AudioBase::FindClosestAudioKeyFrame(int64_t Sample) {
|
|||
return Frames.size() - 1;
|
||||
}
|
||||
|
||||
int FFAudioSource::GetTrackIndex(int &Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Index < 0) {
|
||||
Index = -1;
|
||||
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
|
||||
if (FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||
Index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (Index < 0) {
|
||||
_snprintf(ErrorMsg, MsgSize, "No audio track found");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (Index >= (int)FormatContext->nb_streams) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Invalid audio track number");
|
||||
return 2;
|
||||
}
|
||||
|
||||
if (FormatContext->streams[Index]->codec->codec_type != CODEC_TYPE_AUDIO) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Selected track is not audio");
|
||||
return 3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void FFAudioSource::Free(bool CloseCodec) {
|
||||
if (CloseCodec)
|
||||
avcodec_close(CodecContext);
|
||||
av_close_input_file(FormatContext);
|
||||
}
|
||||
|
||||
FFAudioSource::FFAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
||||
FormatContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
|
||||
if (av_open_input_file(&FormatContext, SourceFile, NULL, 0, NULL) != 0) {
|
||||
_snprintf(ErrorMsg, MsgSize, "Couldn't open '%s'", SourceFile);
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
if (av_find_stream_info(FormatContext) < 0) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Couldn't find stream information");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
AudioTrack = Track;
|
||||
if (GetTrackIndex(AudioTrack, ErrorMsg, MsgSize)) {
|
||||
Free(false);
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
Frames = (*TrackIndices)[AudioTrack];
|
||||
|
||||
if (Frames.size() == 0) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio track contains no frames");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
CodecContext = FormatContext->streams[AudioTrack]->codec;
|
||||
|
||||
Codec = avcodec_find_decoder(CodecContext->codec_id);
|
||||
if (Codec == NULL) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio codec not found");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
if (avcodec_open(CodecContext, Codec) < 0) {
|
||||
Free(false);
|
||||
_snprintf(ErrorMsg, MsgSize, "Could not open audio codec");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
// Always try to decode a frame to make sure all required parameters are known
|
||||
uint8_t DummyBuf[512];
|
||||
if (GetAudio(DummyBuf, 0, 1, ErrorMsg, MsgSize)) {
|
||||
Free(true);
|
||||
throw ErrorMsg;
|
||||
}
|
||||
|
||||
AP.BitsPerSample = av_get_bits_per_sample_format(CodecContext->sample_fmt);
|
||||
AP.Channels = CodecContext->channels;;
|
||||
AP.Float = AudioFMTIsFloat(CodecContext->sample_fmt);
|
||||
AP.SampleRate = CodecContext->sample_rate;
|
||||
AP.NumSamples = (Frames.back()).SampleStart;
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0) {
|
||||
Free(true);
|
||||
_snprintf(ErrorMsg, MsgSize, "Codec returned zero size audio");
|
||||
throw ErrorMsg;
|
||||
}
|
||||
}
|
||||
|
||||
int FFAudioSource::DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
int Ret = -1;
|
||||
*Count = 0;
|
||||
AVPacket Packet;
|
||||
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == AudioTrack) {
|
||||
uint8_t *Data = Packet.data;
|
||||
int Size = Packet.size;
|
||||
|
||||
while (Size > 0) {
|
||||
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 10;
|
||||
Ret = avcodec_decode_audio2(CodecContext, (int16_t *)Buf, &TempOutputBufSize, Data, Size);
|
||||
|
||||
if (Ret < 0) {// throw error or something?
|
||||
av_free_packet(&Packet);
|
||||
goto Done;
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
Size -= Ret;
|
||||
Data += Ret;
|
||||
Buf += TempOutputBufSize;
|
||||
if (SizeConst)
|
||||
*Count += TempOutputBufSize / SizeConst;
|
||||
}
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
goto Done;
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
}
|
||||
|
||||
Done:
|
||||
return Ret;
|
||||
}
|
||||
|
||||
int FFAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
const size_t SizeConst = (av_get_bits_per_sample_format(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 50, (int64_t)0);
|
||||
memset(Buf, 0, SizeConst * Count);
|
||||
AVPacket Packet;
|
||||
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
av_seek_frame(FormatContext, AudioTrack, Frames[CurrentAudioBlock].DTS, AVSEEK_FLAG_BACKWARD);
|
||||
|
||||
// Establish where we actually are
|
||||
// Trigger on packet dts difference since groups can otherwise be indistinguishable
|
||||
int64_t LastDTS = - 1;
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == AudioTrack) {
|
||||
if (LastDTS < 0) {
|
||||
LastDTS = Packet.dts;
|
||||
} else if (LastDTS != Packet.dts) {
|
||||
for (size_t i = 0; i < Frames.size(); i++)
|
||||
if (Frames[i].DTS == Packet.dts) {
|
||||
// The current match was consumed
|
||||
CurrentAudioBlock = i + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
}
|
||||
|
||||
uint8_t *DstBuf = (uint8_t *)Buf;
|
||||
int64_t RemainingSamples = Count;
|
||||
int64_t DecodeCount;
|
||||
|
||||
do {
|
||||
int64_t DecodeStart = Frames[CurrentAudioBlock].SampleStart;
|
||||
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, ErrorMsg, MsgSize);
|
||||
if (Ret < 0) {
|
||||
// FIXME
|
||||
//Env->ThrowError("Bleh, bad audio decoding");
|
||||
}
|
||||
CurrentAudioBlock++;
|
||||
|
||||
int64_t OffsetBytes = SizeConst * FFMAX(0, Start - DecodeStart);
|
||||
int64_t CopyBytes = FFMAX(0, SizeConst * FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart)));
|
||||
|
||||
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
|
||||
DstBuf += CopyBytes;
|
||||
|
||||
if (SizeConst)
|
||||
RemainingSamples -= CopyBytes / SizeConst;
|
||||
|
||||
} while (RemainingSamples > 0 && CurrentAudioBlock < Frames.size());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
FFAudioSource::~FFAudioSource() {
|
||||
Free(true);
|
||||
}
|
||||
|
||||
int MatroskaAudioSource::GetTrackIndex(int &Index, char *ErrorMsg, unsigned MsgSize) {
|
||||
if (Index < 0) {
|
||||
Index = -1;
|
||||
|
@ -69,8 +271,16 @@ int MatroskaAudioSource::GetTrackIndex(int &Index, char *ErrorMsg, unsigned MsgS
|
|||
return 0;
|
||||
}
|
||||
|
||||
void MatroskaAudioSource::Free(bool CloseAudio) {
|
||||
|
||||
void MatroskaAudioSource::Free(bool CloseCodec) {
|
||||
if (CS)
|
||||
cs_Destroy(CS);
|
||||
if (MC.ST.fp) {
|
||||
mkv_Close(MF);
|
||||
fclose(MC.ST.fp);
|
||||
}
|
||||
if (CloseCodec)
|
||||
avcodec_close(CodecContext);
|
||||
av_free(CodecContext);
|
||||
}
|
||||
|
||||
MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
@ -157,7 +367,7 @@ MatroskaAudioSource::MatroskaAudioSource(const char *SourceFile, int Track, Fram
|
|||
}
|
||||
|
||||
MatroskaAudioSource::~MatroskaAudioSource() {
|
||||
|
||||
Free(true);
|
||||
}
|
||||
|
||||
int MatroskaAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
|
@ -175,6 +385,7 @@ int MatroskaAudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count, char
|
|||
int64_t DecodeStart = Frames[CurrentAudioBlock].SampleStart;
|
||||
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, Frames[CurrentAudioBlock].FilePos, Frames[CurrentAudioBlock].FrameSize, ErrorMsg, MsgSize);
|
||||
if (Ret < 0) {
|
||||
// FIXME
|
||||
//Env->ThrowError("Bleh, bad audio decoding");
|
||||
}
|
||||
CurrentAudioBlock++;
|
||||
|
|
|
@ -47,27 +47,20 @@ public:
|
|||
virtual int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) = 0;
|
||||
};
|
||||
|
||||
/*
|
||||
class FFmpegAudioSource : public FFAudioBase {
|
||||
class FFAudioSource : public AudioBase {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
AVCodecContext *AudioCodecContext;
|
||||
|
||||
int AudioTrack;
|
||||
FILE *RawCache;
|
||||
unsigned int BufferSize;
|
||||
uint8_t *Buffer;
|
||||
|
||||
bool LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *AAudioCacheFile2, const char *ASource, int AAudioTrack);
|
||||
int DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env);
|
||||
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
|
||||
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, char *ErrorMsg, unsigned MsgSize);
|
||||
int GetTrackIndex(int &Index, char *ErrorMsg, unsigned MsgSize);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
FFmpegAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, const char *AAudioCacheFile2, IScriptEnvironment *Env);
|
||||
~FFmpegAudioSource();
|
||||
FFAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
~FFAudioSource();
|
||||
|
||||
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
|
||||
int GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
};
|
||||
*/
|
||||
|
||||
class MatroskaAudioSource : public AudioBase {
|
||||
private:
|
||||
|
@ -78,7 +71,7 @@ private:
|
|||
|
||||
int DecodeNextAudioBlock(uint8_t *Buf, int64_t *Count, uint64_t FilePos, unsigned int FrameSize, char *ErrorMsg, unsigned MsgSize);
|
||||
int GetTrackIndex(int &Index, char *ErrorMsg, unsigned MsgSize);
|
||||
void Free(bool CloseAudio);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
MatroskaAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize);
|
||||
~MatroskaAudioSource();
|
||||
|
|
|
@ -160,8 +160,7 @@ AvisynthAudioSource::AvisynthAudioSource(const char *SourceFile, int Track, Fram
|
|||
case 16: VI.sample_type = SAMPLE_INT16; break;
|
||||
case 24: VI.sample_type = SAMPLE_INT24; break;
|
||||
case 32: VI.sample_type = SAMPLE_INT32; break;
|
||||
default:;
|
||||
// FIXME error here
|
||||
default: Env->ThrowError("FFAudioSource: Bad audio format");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,30 @@
|
|||
#include "ffaudiosource.h"
|
||||
#include "indexing.h"
|
||||
|
||||
FrameInfo::FrameInfo(int64_t DTS, bool KeyFrame) {
|
||||
this->DTS = DTS;
|
||||
this->SampleStart = 0;
|
||||
this->FilePos = 0;
|
||||
this->FrameSize = 0;
|
||||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame) {
|
||||
this->DTS = DTS;
|
||||
this->SampleStart = SampleStart;
|
||||
this->FilePos = 0;
|
||||
this->FrameSize = 0;
|
||||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame) {
|
||||
this->DTS = 0;
|
||||
this->SampleStart = SampleStart;
|
||||
this->FilePos = FilePos;
|
||||
this->FrameSize = FrameSize;
|
||||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FFMS_API(void) FFMS_Init() {
|
||||
static bool InitDone = false;
|
||||
if (!InitDone)
|
||||
|
@ -47,7 +71,7 @@ FFMS_API(VideoBase *) FFMS_CreateVideoSource(const char *SourceFile, int Track,
|
|||
FFMS_API(AudioBase *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FrameIndex *TrackIndices, char *ErrorMsg, unsigned MsgSize) {
|
||||
try {
|
||||
switch (TrackIndices->Decoder) {
|
||||
//case 0: return new FFVideoSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
case 0: return new FFAudioSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
case 1: return new MatroskaAudioSource(SourceFile, Track, TrackIndices, ErrorMsg, MsgSize);
|
||||
default:
|
||||
_snprintf(ErrorMsg, MsgSize, "Unsupported format");
|
||||
|
@ -88,6 +112,10 @@ FFMS_API(const AVFrameLite *) FFMS_GetFrame(VideoBase *VB, int n, char *ErrorMsg
|
|||
return (AVFrameLite *)VB->GetFrame(n, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrameByTime(VideoBase *VB, double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
return (AVFrameLite *)VB->GetFrameByTime(Time, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetAudio(AudioBase *AB, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize) {
|
||||
return AB->GetAudio(Buf, Start, Count, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
|
|
@ -122,14 +122,13 @@ struct TrackTimeBase {
|
|||
|
||||
class FrameInfo {
|
||||
public:
|
||||
union {
|
||||
int64_t DTS;
|
||||
int64_t SampleStart;
|
||||
};
|
||||
int64_t DTS;
|
||||
int64_t SampleStart;
|
||||
int64_t FilePos;
|
||||
unsigned int FrameSize;
|
||||
bool KeyFrame;
|
||||
FrameInfo(int64_t DTS, bool KeyFrame);
|
||||
FrameInfo(int64_t DTS, int64_t SampleStart, bool KeyFrame);
|
||||
FrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame);
|
||||
};
|
||||
|
||||
|
@ -166,6 +165,7 @@ FFMS_API(int) FFMS_GetASTrack(AudioBase *AB);
|
|||
FFMS_API(const VideoProperties *) FFMS_GetVideoProperties(VideoBase *VB);
|
||||
FFMS_API(const AudioProperties *) FFMS_GetAudioProperties(AudioBase *AB);
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrame(VideoBase *VB, int n, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(const AVFrameLite *) FFMS_GetFrameByTime(VideoBase *VB, double Time, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_GetAudio(AudioBase *AB, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
|
||||
FFMS_API(int) FFMS_SetOutputFormat(VideoBase *VB, int TargetFormat, int Width, int Height);
|
||||
FFMS_API(void) FFMS_ResetOutputFormat(VideoBase *VB);
|
||||
|
|
|
@ -109,11 +109,9 @@ VideoBase::~VideoBase() {
|
|||
av_free(DecodeFrame);
|
||||
}
|
||||
|
||||
AVFrame *GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
// FIXME
|
||||
//Frames.ClosestFrameFromDTS();
|
||||
//return GetFrame(, ErrorMsg, MsgSize);
|
||||
return NULL;
|
||||
AVFrameLite *VideoBase::GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize) {
|
||||
int Frame = Frames.ClosestFrameFromDTS(Time * Frames.TB.Den / Frames.TB.Num);
|
||||
return GetFrame(Frame, ErrorMsg, MsgSize);
|
||||
}
|
||||
|
||||
int VideoBase::SetOutputFormat(int TargetFormats, int Width, int Height) {
|
||||
|
|
|
@ -27,21 +27,21 @@
|
|||
#include "indexing.h"
|
||||
#include "wave64writer.h"
|
||||
|
||||
class AudioContext {
|
||||
class MatroskaAudioContext {
|
||||
public:
|
||||
Wave64Writer *W64W;
|
||||
AVCodecContext *CTX;
|
||||
CompressedStream *CS;
|
||||
int64_t CurrentSample;
|
||||
|
||||
AudioContext() {
|
||||
MatroskaAudioContext() {
|
||||
W64W = NULL;
|
||||
CTX = NULL;
|
||||
CS = NULL;
|
||||
CurrentSample = 0;
|
||||
}
|
||||
|
||||
~AudioContext() {
|
||||
~MatroskaAudioContext() {
|
||||
delete W64W;
|
||||
if (CTX) {
|
||||
avcodec_close(CTX);
|
||||
|
@ -52,19 +52,56 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class IndexMemory {
|
||||
class FFAudioContext {
|
||||
public:
|
||||
Wave64Writer *W64W;
|
||||
AVCodecContext *CTX;
|
||||
int64_t CurrentSample;
|
||||
|
||||
FFAudioContext() {
|
||||
W64W = NULL;
|
||||
CTX = 0;
|
||||
CurrentSample = 0;
|
||||
}
|
||||
|
||||
~FFAudioContext() {
|
||||
delete W64W;
|
||||
if (CTX)
|
||||
avcodec_close(CTX);
|
||||
}
|
||||
};
|
||||
|
||||
class MatroskaIndexMemory {
|
||||
private:
|
||||
int16_t *DecodingBuffer;
|
||||
AudioContext *AudioContexts;
|
||||
MatroskaAudioContext *AudioContexts;
|
||||
public:
|
||||
IndexMemory(int Tracks, int16_t *&DecodingBuffer, AudioContext *&AudioContexts) {
|
||||
MatroskaIndexMemory(int Tracks, int16_t *&DecodingBuffer, MatroskaAudioContext *&AudioContexts) {
|
||||
DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE*10];
|
||||
AudioContexts = new AudioContext[Tracks];
|
||||
AudioContexts = new MatroskaAudioContext[Tracks];
|
||||
this->DecodingBuffer = DecodingBuffer;
|
||||
this->AudioContexts = AudioContexts;
|
||||
}
|
||||
|
||||
~IndexMemory() {
|
||||
~MatroskaIndexMemory() {
|
||||
delete [] DecodingBuffer;
|
||||
delete [] AudioContexts;
|
||||
}
|
||||
};
|
||||
|
||||
class FFIndexMemory {
|
||||
private:
|
||||
int16_t *DecodingBuffer;
|
||||
FFAudioContext *AudioContexts;
|
||||
public:
|
||||
FFIndexMemory(int Tracks, int16_t *&DecodingBuffer, FFAudioContext *&AudioContexts) {
|
||||
DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE*10];
|
||||
AudioContexts = new FFAudioContext[Tracks];
|
||||
this->DecodingBuffer = DecodingBuffer;
|
||||
this->AudioContexts = AudioContexts;
|
||||
}
|
||||
|
||||
~FFIndexMemory() {
|
||||
delete [] DecodingBuffer;
|
||||
delete [] AudioContexts;
|
||||
}
|
||||
|
@ -86,7 +123,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
static bool DTSComparison(FrameInfo FI1, FrameInfo FI2) {
|
||||
return FI1.DTS < FI2.DTS;
|
||||
}
|
||||
|
@ -157,8 +193,8 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
|
|||
// Audio stuff
|
||||
|
||||
int16_t *db;
|
||||
AudioContext *AudioContexts;
|
||||
IndexMemory IM = IndexMemory(mkv_GetNumTracks(MF), db, AudioContexts);
|
||||
MatroskaAudioContext *AudioContexts;
|
||||
MatroskaIndexMemory IM = MatroskaIndexMemory(mkv_GetNumTracks(MF), db, AudioContexts);
|
||||
|
||||
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++) {
|
||||
if (IndexMask & (1 << i) && mkv_GetTrackInfo(MF, i)->Type == TT_AUDIO) {
|
||||
|
@ -217,12 +253,11 @@ static FrameIndex *MakeMatroskaIndex(const char *SourceFile, int IndexMask, int
|
|||
}
|
||||
|
||||
// Only create index entries for video for now to save space
|
||||
if (mkv_GetTrackInfo(MF, Track)->Type == TT_VIDEO)
|
||||
if (mkv_GetTrackInfo(MF, Track)->Type == TT_VIDEO) {
|
||||
(*TrackIndices)[Track].push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
|
||||
|
||||
if (IndexMask & (1 << Track)) {
|
||||
ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize);
|
||||
} else if (mkv_GetTrackInfo(MF, Track)->Type == TT_AUDIO && (IndexMask & (1 << Track))) {
|
||||
(*TrackIndices)[Track].push_back(FrameInfo(AudioContexts[Track].CurrentSample, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
|
||||
ReadFrame(FilePos, FrameSize, AudioContexts[Track].CS, MC, ErrorMsg, MsgSize);
|
||||
|
||||
int Size = FrameSize;
|
||||
uint8_t *Data = MC.Buffer;
|
||||
|
@ -298,8 +333,8 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
|
|||
// Audio stuff
|
||||
|
||||
int16_t *db;
|
||||
AudioContext *AudioContexts;
|
||||
IndexMemory IM = IndexMemory(FormatContext->nb_streams, db, AudioContexts);
|
||||
FFAudioContext *AudioContexts;
|
||||
FFIndexMemory IM = FFIndexMemory(FormatContext->nb_streams, db, AudioContexts);
|
||||
|
||||
for (unsigned int i = 0; i < FormatContext->nb_streams; i++) {
|
||||
if (IndexMask & (1 << i) && FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||
|
@ -342,55 +377,54 @@ FrameIndex *MakeIndex(const char *SourceFile, int IndexMask, int DumpMask, const
|
|||
}
|
||||
|
||||
// Only create index entries for video for now to save space
|
||||
if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_VIDEO)
|
||||
if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
(*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
} else if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO && (IndexMask & (1 << Packet.stream_index))) {
|
||||
(*TrackIndices)[Packet.stream_index].push_back(FrameInfo(Packet.dts, AudioContexts[Packet.stream_index].CurrentSample, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
|
||||
AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec;
|
||||
int Size = Packet.size;
|
||||
uint8_t *Data = Packet.data;
|
||||
|
||||
if (IndexMask & (1 << Packet.stream_index)) {
|
||||
AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec;
|
||||
int Size = Packet.size;
|
||||
uint8_t *Data = Packet.data;
|
||||
|
||||
while (Size > 0) {
|
||||
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
|
||||
int Ret = avcodec_decode_audio2(AudioCodecContext, db, &dbsize, Data, Size);
|
||||
if (Ret < 0) {
|
||||
if (IgnoreDecodeErrors) {
|
||||
(*TrackIndices)[Packet.stream_index].clear();
|
||||
IndexMask &= ~(1 << Packet.stream_index);
|
||||
break;
|
||||
} else {
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio decoding error");
|
||||
delete TrackIndices;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
Size -= Ret;
|
||||
Data += Ret;
|
||||
}
|
||||
|
||||
// FIXME currentsample calculation here
|
||||
if (dbsize > 0)
|
||||
dbsize = dbsize;
|
||||
|
||||
if (dbsize > 0 && (DumpMask & (1 << Packet.stream_index))) {
|
||||
// Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers.
|
||||
if (!AudioContexts[Packet.stream_index].W64W) {
|
||||
char ABuf[50];
|
||||
std::string WN(AudioFile);
|
||||
int Offset = (Packet.dts * FormatContext->streams[Packet.stream_index]->time_base.num)
|
||||
/ (double)(FormatContext->streams[Packet.stream_index]->time_base.den * 1000);
|
||||
_snprintf(ABuf, sizeof(ABuf), ".%02d.delay.%d.w64", Packet.stream_index, Offset);
|
||||
WN += ABuf;
|
||||
|
||||
AudioContexts[Packet.stream_index].W64W = new Wave64Writer(WN.c_str(), av_get_bits_per_sample_format(AudioCodecContext->sample_fmt),
|
||||
AudioCodecContext->channels, AudioCodecContext->sample_rate, AudioFMTIsFloat(AudioCodecContext->sample_fmt));
|
||||
}
|
||||
|
||||
AudioContexts[Packet.stream_index].W64W->WriteData(db, dbsize);
|
||||
while (Size > 0) {
|
||||
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
|
||||
int Ret = avcodec_decode_audio2(AudioCodecContext, db, &dbsize, Data, Size);
|
||||
if (Ret < 0) {
|
||||
if (IgnoreDecodeErrors) {
|
||||
(*TrackIndices)[Packet.stream_index].clear();
|
||||
IndexMask &= ~(1 << Packet.stream_index);
|
||||
break;
|
||||
} else {
|
||||
_snprintf(ErrorMsg, MsgSize, "Audio decoding error");
|
||||
delete TrackIndices;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
Size -= Ret;
|
||||
Data += Ret;
|
||||
}
|
||||
|
||||
if (dbsize > 0)
|
||||
AudioContexts[Packet.stream_index].CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_format(AudioCodecContext->sample_fmt) * AudioCodecContext->channels);
|
||||
|
||||
if (dbsize > 0 && (DumpMask & (1 << Packet.stream_index))) {
|
||||
// Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers.
|
||||
if (!AudioContexts[Packet.stream_index].W64W) {
|
||||
char ABuf[50];
|
||||
std::string WN(AudioFile);
|
||||
int Offset = (Packet.dts * FormatContext->streams[Packet.stream_index]->time_base.num)
|
||||
/ (double)(FormatContext->streams[Packet.stream_index]->time_base.den * 1000);
|
||||
_snprintf(ABuf, sizeof(ABuf), ".%02d.delay.%d.w64", Packet.stream_index, Offset);
|
||||
WN += ABuf;
|
||||
|
||||
AudioContexts[Packet.stream_index].W64W = new Wave64Writer(WN.c_str(), av_get_bits_per_sample_format(AudioCodecContext->sample_fmt),
|
||||
AudioCodecContext->channels, AudioCodecContext->sample_rate, AudioFMTIsFloat(AudioCodecContext->sample_fmt));
|
||||
}
|
||||
|
||||
AudioContexts[Packet.stream_index].W64W->WriteData(db, dbsize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
|
@ -457,20 +491,6 @@ FrameIndex *ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize) {
|
|||
return TrackIndices;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t DTS, bool KeyFrame) {
|
||||
this->DTS = DTS;
|
||||
this->FilePos = 0;
|
||||
this->FrameSize = 0;
|
||||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
FrameInfo::FrameInfo(int64_t SampleStart, int64_t FilePos, unsigned int FrameSize, bool KeyFrame) {
|
||||
this->SampleStart = SampleStart;
|
||||
this->FilePos = FilePos;
|
||||
this->FrameSize = FrameSize;
|
||||
this->KeyFrame = KeyFrame;
|
||||
}
|
||||
|
||||
int FrameInfoVector::WriteTimecodes(const char *TimecodeFile, char *ErrorMsg, unsigned MsgSize) {
|
||||
std::ofstream Timecodes(TimecodeFile, std::ios::out | std::ios::trunc);
|
||||
|
||||
|
@ -516,13 +536,13 @@ int FrameInfoVector::FindClosestKeyFrame(int Frame) {
|
|||
}
|
||||
|
||||
FrameInfoVector::FrameInfoVector() {
|
||||
TT = 0;
|
||||
TB.Num = 0;
|
||||
TB.Den = 0;
|
||||
this->TT = 0;
|
||||
this->TB.Num = 0;
|
||||
this->TB.Den = 0;
|
||||
}
|
||||
|
||||
FrameInfoVector::FrameInfoVector(int Num, int Den, int TT) {
|
||||
this->TT = TT;
|
||||
TB.Num = Num;
|
||||
TB.Den = Den;
|
||||
this->TB.Num = Num;
|
||||
this->TB.Den = Den;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ extern "C" {
|
|||
#include "utils.h"
|
||||
#include "ffms.h"
|
||||
|
||||
#define INDEXVERSION 7
|
||||
#define INDEXVERSION 8
|
||||
#define INDEXID 0x53920873
|
||||
|
||||
struct IndexHeader {
|
||||
|
|
Loading…
Reference in New Issue