FFmpegSource 1.4

Originally committed to SVN as r1432.
This commit is contained in:
Fredrik Mellbin 2007-07-18 13:00:15 +00:00
parent fefa76b22c
commit a446253e72
5 changed files with 985 additions and 953 deletions

239
FFmpegSource/ffbase.cpp Normal file
View File

@ -0,0 +1,239 @@
#include "ffmpegsource.h"
int GetSWSCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= SWS_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= SWS_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= SWS_CPU_CAPS_3DNOW;
return Flags;
}
int FFBase::FrameFromDTS(int64_t ADTS) {
for (int i = 0; i < (int)FrameToDTS.size(); i++)
if (FrameToDTS[i].DTS == ADTS)
return i;
return -1;
}
int FFBase::ClosestFrameFromDTS(int64_t ADTS) {
int Frame = 0;
int64_t BestDiff = 0xFFFFFFFFFFFFFF;
for (int i = 0; i < (int)FrameToDTS.size(); i++) {
int64_t CurrentDiff = FFABS(FrameToDTS[i].DTS - ADTS);
if (CurrentDiff < BestDiff) {
BestDiff = CurrentDiff;
Frame = i;
}
}
return Frame;
}
int FFBase::FindClosestKeyFrame(int AFrame) {
for (int i = AFrame; i > 0; i--)
if (FrameToDTS[i].KeyFrame)
return i;
return 0;
}
bool FFBase::LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffv%dcache", ASource, AVideoTrack);
if (!strcmp(AVideoCacheFile, ""))
AVideoCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AVideoCacheFile, "r");
if (!CacheFile)
return false;
if (fscanf(CacheFile, "%d\r\n", &VI.num_frames) <= 0) {
fclose(CacheFile);
return false;
}
for (int i = 0; i < VI.num_frames; i++) {
int64_t DTSTemp;
int KFTemp;
fscanf(CacheFile, "%lld %d\r\n", &DTSTemp, &KFTemp);
FrameToDTS.push_back(FrameInfo(DTSTemp, KFTemp != 0));
}
fclose(CacheFile);
return true;
}
bool FFBase::SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffv%dcache", ASource, AVideoTrack);
if (!strcmp(AVideoCacheFile, ""))
AVideoCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AVideoCacheFile, "wb");
if (!CacheFile)
return false;
fprintf(CacheFile, "%d\r\n", VI.num_frames);
for (int i = 0; i < VI.num_frames; i++)
fprintf(CacheFile, "%lld %d\r\n", FrameToDTS[i].DTS, (int)(FrameToDTS[i].KeyFrame ? 1 : 0));
fclose(CacheFile);
return true;
}
bool FFBase::SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN) {
if (!strcmp(ATimecodeFile, ""))
return true;
FILE *TimecodeFile = fopen(ATimecodeFile, "wb");
if (!TimecodeFile)
return false;
for (int i = 0; i < VI.num_frames; i++)
fprintf(TimecodeFile, "%f\r\n", (FrameToDTS[i].DTS * ScaleD) / (double)ScaleN);
fclose(TimecodeFile);
return true;
}
bool FFBase::PrepareAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
AudioCache = fopen(AAudioCacheFile, "rb");
if (!AudioCache) {
AudioCache = fopen(AAudioCacheFile, "wb+");
if (!AudioCache)
Env->ThrowError("FFmpegSource: Failed to open the audio cache file for writing");
return false;
}
_fseeki64(AudioCache, 0, SEEK_END);
int64_t CacheSize = _ftelli64(AudioCache);
if (CacheSize > 0) {
VI.num_audio_samples = VI.AudioSamplesFromBytes(CacheSize);
return true;
}
AudioCache = freopen(AAudioCacheFile, "wb", AudioCache);
if (!AudioCache)
Env->ThrowError("FFmpegSource: Failed to open the audio cache file for writing");
return false;
}
void FFBase::InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env) {
if (!strcmp(APPString, ""))
return;
if (AQuality < 0 || AQuality > PP_QUALITY_MAX)
Env->ThrowError("FFmpegSource: Quality is out of range");
PPMode = pp_get_mode_by_name_and_quality((char *)APPString, AQuality);
if (!PPMode)
Env->ThrowError("FFmpegSource: Invalid postprocesing settings");
int Flags = GetPPCPUFlags(Env);
switch (APixelFormat) {
case PIX_FMT_YUV420P: Flags |= PP_FORMAT_420; break;
case PIX_FMT_YUV422P: Flags |= PP_FORMAT_422; break;
case PIX_FMT_YUV411P: Flags |= PP_FORMAT_411; break;
case PIX_FMT_YUV444P: Flags |= PP_FORMAT_444; break;
default:
Env->ThrowError("FFmpegSource: Input format is not supported for postprocessing");
}
PPContext = pp_get_context(VI.width, VI.height, Flags);
if (avpicture_alloc(&PPPicture, APixelFormat, AWidth, AHeight) < 0)
Env->ThrowError("FFmpegSource: Failed to allocate picture");
}
void FFBase::SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env) {
int Loss;
int BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), ACurrentFormat, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss);
switch (BestFormat) {
case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break;
case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break;
case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break;
case PIX_FMT_BGR24: VI.pixel_type = VideoInfo::CS_BGR24; break;
default:
Env->ThrowError("FFmpegSource: No suitable output format found");
}
if (BestFormat != ACurrentFormat) {
ConvertToFormat = BestFormat;
SWS = sws_getContext(VI.width, VI.height, ACurrentFormat, VI.width, VI.height, ConvertToFormat, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
}
}
PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) {
AVPicture *SrcPicture = (AVPicture *)AFrame;
if (PPContext) {
pp_postprocess(AFrame->data, AFrame->linesize, PPPicture.data, PPPicture.linesize, VI.width, VI.height, AFrame->qscale_table, AFrame->qstride, PPMode, PPContext, AFrame->pict_type | (AFrame->qscale_type ? PP_PICT_TYPE_QP2 : 0));
SrcPicture = &PPPicture;
}
PVideoFrame Dst = Env->NewVideoFrame(VI);
if (ConvertToFormat != PIX_FMT_NONE && VI.pixel_type == VideoInfo::CS_I420) {
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
} else if (ConvertToFormat != PIX_FMT_NONE) {
if (VI.IsRGB()) {
uint8_t *DstData[1] = {Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1)};
int DstStride[1] = {-Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
} else {
uint8_t *DstData[1] = {Dst->GetWritePtr()};
int DstStride[1] = {Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
}
} else if (VI.pixel_type == VideoInfo::CS_I420) {
Env->BitBlt(Dst->GetWritePtr(PLANAR_Y), Dst->GetPitch(PLANAR_Y), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(PLANAR_Y), Dst->GetHeight(PLANAR_Y));
Env->BitBlt(Dst->GetWritePtr(PLANAR_U), Dst->GetPitch(PLANAR_U), SrcPicture->data[1], SrcPicture->linesize[1], Dst->GetRowSize(PLANAR_U), Dst->GetHeight(PLANAR_U));
Env->BitBlt(Dst->GetWritePtr(PLANAR_V), Dst->GetPitch(PLANAR_V), SrcPicture->data[2], SrcPicture->linesize[2], Dst->GetRowSize(PLANAR_V), Dst->GetHeight(PLANAR_V));
} else {
if (VI.IsRGB())
Env->BitBlt(Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1), -Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
else
Env->BitBlt(Dst->GetWritePtr(), Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
}
return Dst;
}
void __stdcall FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
_fseeki64(AudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET);
fread(Buf, 1, VI.BytesFromAudioSamples(Count), AudioCache);
}
FFBase::FFBase() {
memset(&VI, 0, sizeof(VI));
AudioCache = NULL;
PPContext = NULL;
PPMode = NULL;
SWS = NULL;
ConvertToFormat = PIX_FMT_NONE;
memset(&PPPicture, 0, sizeof(PPPicture));
DecodeFrame = avcodec_alloc_frame();
}
FFBase::~FFBase() {
if (SWS)
sws_freeContext(SWS);
if (PPMode)
pp_free_mode(PPMode);
if (PPContext)
pp_free_context(PPContext);
avpicture_free(&PPPicture);
av_free(DecodeFrame);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
#include <windows.h>
#include <stdio.h>
#include <vector>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <io.h>
#include "MatroskaParser.h"
#include "avisynth.h"
extern "C" {
#include <ffmpeg\avformat.h>
#include <ffmpeg\avcodec.h>
#include <ffmpeg\swscale.h>
#include <postproc\postprocess.h>
}
int GetPPCPUFlags(IScriptEnvironment *Env);
int GetSWSCPUFlags(IScriptEnvironment *Env);
CodecID MatroskaToFFCodecID(TrackInfo *TI);
class FFPP : public GenericVideoFilter {
private:
pp_context_t *PPContext;
pp_mode_t *PPMode;
SwsContext *SWSTo422P;
SwsContext *SWSFrom422P;
AVPicture InputPicture;
AVPicture OutputPicture;
public:
FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env);
~FFPP();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
class FFBase : public IClip{
private:
pp_context_t *PPContext;
pp_mode_t *PPMode;
SwsContext *SWS;
int ConvertToFormat;
AVPicture PPPicture;
protected:
VideoInfo VI;
AVFrame *DecodeFrame;
FILE *AudioCache;
struct FrameInfo {
int64_t DTS;
bool KeyFrame;
FrameInfo(int64_t ADTS, bool AKeyFrame) : DTS(ADTS), KeyFrame(AKeyFrame) {};
};
std::vector<FrameInfo> FrameToDTS;
int FindClosestKeyFrame(int AFrame);
int FrameFromDTS(int64_t ADTS);
int ClosestFrameFromDTS(int64_t ADTS);
bool LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN);
bool PrepareAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
void InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env);
void SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env);
PVideoFrame OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env);
public:
FFBase();
~FFBase();
bool __stdcall GetParity(int n) { return false; }
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
const VideoInfo& __stdcall GetVideoInfo() { return VI; }
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env);
};

View File

@ -10,6 +10,15 @@ FFmpegSource Documentation
<h2>Changes</h2> <h2>Changes</h2>
<ul> <ul>
<li>1.4<ul>
<li>Naming scheme of cache files changed to prevent confusion with the default names in files with multiple tracks of the same type</li>
<li>Use mmx optimizations in swscaler when possible</li>
<li>Now uses normal windows linebreaks in all files</li>
<li>Removed FFAudioSource</li>
<li>Merged FFVideoSource and FFAudioRefSource into FFmpegSource</li>
<li>Added postprocessing with libpostproc in FFmpegSource and separately in FFPP</li>
</ul></li>
<li>1.3<ul> <li>1.3<ul>
<li>Compiled against ffmpeg rev9620</li> <li>Compiled against ffmpeg rev9620</li>
<li>Added FFAudioRefSource</li> <li>Added FFAudioRefSource</li>
@ -36,55 +45,6 @@ FFmpegSource Documentation
</ul> </ul>
<h2>Usage</h2>
<p>
<b>FFVideoSource(string source, int track = -1, int seekmode = 1, string timecodes, bool cache = true, string cachefile)</b><br />
A video source that is frame accurate for most popular containers
</p>
<p>
<b>FFAudioSource(string source, int track = -1)</b><br />
Very unreliable attempt at sample accurate seeking in audio files
</p>
<p>
<b>FFAudioRefSource(string source, int track = -1, string cachefile)</b><br />
Decodes all audio to a temporary file which ensures that it will always be sample accurate
</p>
<p>
<b>source:</b>
source file
</p>
<p>
<b>track:</b>
track number as seen by the relevant demuxer, starts from 0, -1 means it will pick the first suitable track
</p>
<p>
<b>seekmode:</b>
force how seeking is handled, has no effect on matroska files which always use the equivalent of seekmode=1<br />
<b>0:</b> linear access, the definition of slow but should make some formats "usable"<br />
<b>1:</b> safe normal, bases seeking decisions on the reported keyframe positions<br />
<b>2:</b> unsafe normal, same as 1 but no error will be thrown if the exact destination has to be guessed<br />
<b>3:</b> aggressive, seek in the forward direction even if no closer keyframe is known to exist, only useful for containers where avformat doesn't report keyframes properly and testing
</p>
<p>
<b>timecodes:</b>
file to output timecodes to, if the file exists it will always be overwritten
</p>
<p>
<b>cache:</b>
write indexing information to a file for later use
</p>
<p>
<b>cachefile:</b>
if specified the file to store the index information or raw audio in, if nothing is specified (source).ffcache is used for video and (source).ffracache for audio
</p>
<h2>Compatibility - Video</h2> <h2>Compatibility - Video</h2>
<ul> <ul>
@ -98,9 +58,108 @@ Decodes all audio to a temporary file which ensures that it will always be sampl
<h2>Compatibility - Audio</h2> <h2>Compatibility - Audio</h2>
<ul> <ul>
<li>Not sample accurate ever?</li> <li>Sample accurate in all containers</li>
</ul> </ul>
<h2>Usage</h2>
<p>
<b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, string pp, int ppquality = 6, int seekmode = 1)</b><br />
</p>
<p>
<b>FFPP(clip, string pp, int ppquality = 6)</b><br />
Separate postprocessing which also seems to include a few simple deinterlacers
</p>
<p>
<b>source:</b>
Source file.
</p>
<p>
<b>atrack &amp; vtrack:</b>
Track number as seen by the relevant demuxer, starts from 0, -1 means it will pick the first suitable track and -2 means it's disabled.
</p>
<p>
<b>timecodes:</b>
File to output timecodes to, if the file exists it will be overwritten.
</p>
<p>
<b>vcache:</b>
Write video indexing information to a file for later use.
</p>
<p>
<b>vcachefile &amp; acachefile:</b>
Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(tracknumber)cache for audio.
</p>
<p>
<b>pp:</b>
See the table below for a full description, an empty string means no processing. It is recommended to avoid the autoq option since it's currently unknown what effect it will have on the processing.
</p>
<p>
<b>ppquality:</b>
The quality to use for the specified postprocessing. Valid values are 0-6 where 0 usually means that no actual processing is done.
</p>
<p>
<b>seekmode:</b>
Force how seeking is handled, has no effect on matroska files which always use the equivalent of seekmode=1<br />
<b>0:</b> linear access, the definition of slow but should make some formats "usable"<br />
<b>1:</b> safe normal, bases seeking decisions on the reported keyframe positions<br />
<b>2:</b> unsafe normal, same as 1 but no error will be thrown if the exact destination has to be guessed<br />
<b>3:</b> aggressive, seek in the forward direction even if no closer keyframe is known to exist, only useful for testing and containers where avformat doesn't report keyframes properly
</p>
<h2>PP string format</h2>
<pre>
Available postprocessing filters:
Filters Options
short long name short long option Description
* * a autoq CPU power dependent enabler
c chrom chrominance filtering enabled
y nochrom chrominance filtering disabled
n noluma luma filtering disabled
hb hdeblock (2 threshold) horizontal deblocking filter
1. difference factor: default=32, higher -> more deblocking
2. flatness threshold: default=39, lower -> more deblocking
the h & v deblocking filters share these
so you can't set different thresholds for h / v
vb vdeblock (2 threshold) vertical deblocking filter
ha hadeblock (2 threshold) horizontal deblocking filter
va vadeblock (2 threshold) vertical deblocking filter
h1 x1hdeblock experimental h deblock filter 1
v1 x1vdeblock experimental v deblock filter 1
dr dering deringing filter
al autolevels automatic brightness / contrast
f fullyrange stretch luminance to (0..255)
lb linblenddeint linear blend deinterlacer
li linipoldeint linear interpolating deinterlace
ci cubicipoldeint cubic interpolating deinterlacer
md mediandeint median deinterlacer
fd ffmpegdeint ffmpeg deinterlacer
l5 lowpass5 FIR lowpass deinterlacer
de default hb:a,vb:a,dr:a
fa fast h1:a,v1:a,dr:a
ac ha:a:128:7,va:a,dr:a
tn tmpnoise (3 threshold) temporal noise reducer
1. <= 2. <= 3. larger -> stronger filtering
fq forceQuant <quantizer> force quantizer
Usage:
<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...
long form example:
vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock
short form example:
vb:a/hb:a/lb de,-vb
more examples:
tn:64:128:256
</pre>
<h2>Compiling</h2> <h2>Compiling</h2>

91
FFmpegSource/ffpp.cpp Normal file
View File

@ -0,0 +1,91 @@
#include "ffmpegsource.h"
int GetPPCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= PP_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= PP_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= PP_CPU_CAPS_3DNOW;
return Flags;
}
FFPP::FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env) : GenericVideoFilter(AChild) {
if (!strcmp(APPString, ""))
Env->ThrowError("FFPP: PP argument is empty");
if (AQuality < 0 || AQuality > PP_QUALITY_MAX)
Env->ThrowError("FFPP: Quality is out of range");
PPContext = NULL;
PPMode = NULL;
SWSTo422P = NULL;
SWSFrom422P = NULL;
memset(&InputPicture, 0, sizeof(InputPicture));
memset(&OutputPicture, 0, sizeof(OutputPicture));
PPMode = pp_get_mode_by_name_and_quality((char *)APPString, AQuality);
if (!PPMode)
Env->ThrowError("FFPP: Invalid postprocesing settings");
int Flags = GetPPCPUFlags(Env);
if (vi.IsYV12()) {
Flags |= PP_FORMAT_420;
} else if (vi.IsYUY2()) {
Flags |= PP_FORMAT_422;
SWSTo422P = sws_getContext(vi.width, vi.height, PIX_FMT_YUV422, vi.width, vi.height, PIX_FMT_YUV422P, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
SWSFrom422P = sws_getContext(vi.width, vi.height, PIX_FMT_YUV422P, vi.width, vi.height, PIX_FMT_YUV422, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
avpicture_alloc(&InputPicture, PIX_FMT_YUV422P, vi.width, vi.height);
avpicture_alloc(&OutputPicture, PIX_FMT_YUV422P, vi.width, vi.height);
} else {
Env->ThrowError("FFPP: Only YV12 and YUY2 video supported");
}
PPContext = pp_get_context(vi.width, vi.height, Flags);
if (!PPContext)
Env->ThrowError("FFPP: Failed to create context");
}
FFPP::~FFPP() {
if (PPMode)
pp_free_mode(PPMode);
if (PPContext)
pp_free_context(PPContext);
if (SWSTo422P)
sws_freeContext(SWSTo422P);
if (SWSFrom422P)
sws_freeContext(SWSFrom422P);
avpicture_free(&InputPicture);
avpicture_free(&OutputPicture);
}
PVideoFrame __stdcall FFPP::GetFrame(int n, IScriptEnvironment* Env) {
PVideoFrame Src = child->GetFrame(n, Env);
PVideoFrame Dst = Env->NewVideoFrame(vi);
if (vi.IsYV12()) {
uint8_t *SrcData[3] = {(uint8_t *)Src->GetReadPtr(PLANAR_Y), (uint8_t *)Src->GetReadPtr(PLANAR_U), (uint8_t *)Src->GetReadPtr(PLANAR_V)};
int SrcStride[3] = {Src->GetPitch(PLANAR_Y), Src->GetPitch(PLANAR_U), Src->GetPitch(PLANAR_V)};
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
pp_postprocess(SrcData, SrcStride, DstData, DstStride, vi.width, vi.height, NULL, 0, PPMode, PPContext, 0);
} else if (vi.IsYUY2()) {
uint8_t *SrcData[1] = {(uint8_t *)Src->GetReadPtr()};
int SrcStride[1] = {Src->GetPitch()};
sws_scale(SWSTo422P, SrcData, SrcStride, 0, vi.height, InputPicture.data, InputPicture.linesize);
pp_postprocess(InputPicture.data, InputPicture.linesize, OutputPicture.data, OutputPicture.linesize, vi.width, vi.height, NULL, 0, PPMode, PPContext, 0);
uint8_t *DstData[1] = {Dst->GetWritePtr()};
int DstStride[1] = {Dst->GetPitch()};
sws_scale(SWSFrom422P, OutputPicture.data, OutputPicture.linesize, 0, vi.height, DstData, DstStride);
}
return Dst;
}