本人在ffmpeg movie滤镜添加图片水印博客中用命令行以及代码的方式,利用movie滤镜将jpg图片叠加到视频上。
其实,我们也可以将jpg直接转成yuv420,然后叠加到视频上,此时就不需要movie滤镜。
首先,将jpg转换成yuv420,可以用ffmpeg命令行,命令行如下:
ffmpeg -i flower.jpg -s 480x320 -pix_fmt yuv420p flower.yuv
这里定义了规格,480x320,其实原来的flower.jpg的规格是499x333,我为了linesize的对齐方便,将规格改成了480x320,图片如下所示:
合成效果如下:
main函数如下:
int main()
{
CDrawMyMovie cVideoDrawMovie;
const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer_drawmymovie.mp4";
std::string strMovieFile = "E:\\learn\\ffmpeg\\convert\\flower.yuv";
//std::string strMovieFile = "flower.jpg";
cVideoDrawMovie.StartDrawMyMovie(pFileA, pFileOut, 100, 300, strMovieFile, 480, 320);
cVideoDrawMovie.WaitFinish();
return 0;
}
这里面,100,300代表叠加的位置坐标。
y,u,v分量的读取很简单,因为它们在文件是依次存储的,读取方式如下所示:
int CDrawMyMovie::ReadMovieFile(const char *pFileMovie, int iWidth, int iHeight)
{
m_iMovieWidth = iWidth;
m_iMovieHeight = iHeight;
int ret = -1;
do
{
FILE *fp = fopen(pFileMovie, "rb");
if (NULL == fp)
{
break;
}
m_pMovieY = new uint8_t[iWidth * iHeight];
m_pMovieU = new uint8_t[iWidth * iHeight / 4];
m_pMovieV = new uint8_t[iWidth * iHeight / 4];
int iread = fread(m_pMovieY, 1, iWidth * iHeight, fp);
iread = fread(m_pMovieU, 1, iWidth * iHeight / 4, fp);
iread = fread(m_pMovieV, 1, iWidth * iHeight / 4, fp);
ret = 0;
} while (0);
return ret;
}
下面我们可以根据宽,高,yuv分量值构建水印图片的AVFrame
int iYuv420MovieFrameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_iMovieWidth, m_iMovieHeight, 1);
AVFrame *pFrameVideoMovie = av_frame_alloc();
uint8_t *videoMovie_buffer_yuv420 = (uint8_t *)av_malloc(iYuv420MovieFrameSize);
av_image_fill_arrays(pFrameVideoMovie->data, pFrameVideoMovie->linesize, videoMovie_buffer_yuv420, AV_PIX_FMT_YUV420P, m_iMovieWidth, m_iMovieHeight, 1);
pFrameVideoMovie->width = m_iMovieWidth;
pFrameVideoMovie->height = m_iMovieHeight;
pFrameVideoMovie->format = AV_PIX_FMT_YUV420P;
memcpy(pFrameVideoMovie->data[0], m_pMovieY, m_iMovieWidth * m_iMovieHeight);
memcpy(pFrameVideoMovie->data[1], m_pMovieU, m_iMovieWidth * m_iMovieHeight / 4);
memcpy(pFrameVideoMovie->data[2], m_pMovieV, m_iMovieWidth * m_iMovieHeight / 4);
然后根据下面的代码进行两张图片的叠加
ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoA, pFrameVideoA);
if (ret < 0)
{
break;
}
ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoMovie, pFrameVideoMovie);
if (ret < 0)
{
break;
}
ret = av_buffersink_get_frame(m_pFilterCtxSink, pFrame_out);
if (ret < 0)
{
//printf("Mixer: failed to call av_buffersink_get_frame_flags\n");
break;
}
代码结构如下:
其中FfmpegMyMovieTest.cpp的内容如下:
#include <iostream>
#include "DrawMyMovie.h"
#include <vector>
#ifdef __cplusplus
extern "C"
{
#endif
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
std::string Unicode_to_Utf8(const std::string & str)
{
int nwLen = ::MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, NULL, 0);
wchar_t * pwBuf = new wchar_t[nwLen + 1];//一定要加1,不然会出现尾巴
ZeroMemory(pwBuf, nwLen * 2 + 2);
::MultiByteToWideChar(CP_ACP, 0, str.c_str(), str.length(), pwBuf, nwLen);
int nLen = ::WideCharToMultiByte(CP_UTF8, 0, pwBuf, -1, NULL, NULL, NULL, NULL);
char * pBuf = new char[nLen + 1];
ZeroMemory(pBuf, nLen + 1);
::WideCharToMultiByte(CP_UTF8, 0, pwBuf, nwLen, pBuf, nLen, NULL, NULL);
std::string retStr(pBuf);
delete[]pwBuf;
delete[]pBuf;
pwBuf = NULL;
pBuf = NULL;
return retStr;
}
int main()
{
CDrawMyMovie cVideoDrawMovie;
const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer_drawmymovie.mp4";
std::string strMovieFile = "E:\\learn\\ffmpeg\\convert\\flower.yuv";
//std::string strMovieFile = "flower.jpg";
cVideoDrawMovie.StartDrawMyMovie(pFileA, pFileOut, 100, 300, strMovieFile, 480, 320);
cVideoDrawMovie.WaitFinish();
return 0;
}
DrawMyMovie.h的内容如下:
#pragma once
#include <Windows.h>
#include <string>
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#ifdef __cplusplus
};
#endif
class CDrawMyMovie
{
public:
CDrawMyMovie();
~CDrawMyMovie();
public:
int StartDrawMyMovie(const char *pFileA, const char *pFileOut, int x, int y, std::string strMovieFile, int iMovieWidth, int iMovieHeight);
int WaitFinish();
private:
int OpenFileA(const char *pFile);
int ReadMovieFile(const char *pFileMovie, int iWidth, int iHeight);
int OpenOutPut(const char *pFileOut);
int InitFilter(const char* filter_desc);
private:
static DWORD WINAPI VideoAReadProc(LPVOID lpParam);
void VideoARead();
static DWORD WINAPI VideoDrawMyMovieProc(LPVOID lpParam);
void VideoDrawMyMovie();
private:
AVFormatContext *m_pFormatCtx_FileA = NULL;
AVCodecContext *m_pReadCodecCtx_VideoA = NULL;
AVCodec *m_pReadCodec_VideoA = NULL;
AVCodecContext *m_pCodecEncodeCtx_Video = NULL;
AVFormatContext *m_pFormatCtx_Out = NULL;
AVFifoBuffer *m_pVideoAFifo = NULL;
int m_iVideoWidth = 1920;
int m_iVideoHeight = 1080;
int m_iYuv420FrameSize = 0;
int m_iMovieWidth = 0;
int m_iMovieHeight = 0;
uint8_t *m_pMovieY = NULL;
uint8_t *m_pMovieU = NULL;
uint8_t *m_pMovieV = NULL;
private:
AVFilterGraph* m_pFilterGraph = NULL;
AVFilterContext* m_pFilterCtxSrcVideoA = NULL;
AVFilterContext* m_pFilterCtxSrcVideoMovie = NULL;
AVFilterContext* m_pFilterCtxSink = NULL;
private:
CRITICAL_SECTION m_csVideoASection;
HANDLE m_hVideoAReadThread = NULL;
HANDLE m_hVideoDrawMyMovieThread = NULL;
};
DrawMyMovie.cpp的内容如下:
#include "DrawMyMovie.h"
//#include "log/log.h"
CDrawMyMovie::CDrawMyMovie()
{
InitializeCriticalSection(&m_csVideoASection);
}
CDrawMyMovie::~CDrawMyMovie()
{
DeleteCriticalSection(&m_csVideoASection);
}
int CDrawMyMovie::StartDrawMyMovie(const char *pFileA, const char *pFileOut, int x, int y, std::string strMovieFile, int iMovieWidth, int iMovieHeight)
{
int ret = -1;
do
{
ret = OpenFileA(pFileA);
if (ret != 0)
{
break;
}
ret = ReadMovieFile(strMovieFile.c_str(), iMovieWidth, iMovieHeight);
if (ret != 0)
{
break;
}
ret = OpenOutPut(pFileOut);
if (ret != 0)
{
break;
}
char szFilterDesc[512] = { 0 };
_snprintf(szFilterDesc, sizeof(szFilterDesc),
"[in0][in1]overlay=%d:%d[out]", x, y);
ret = InitFilter(szFilterDesc);
if (ret != 0)
{
break;
}
m_iYuv420FrameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);
//申请30帧缓存
m_pVideoAFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
m_hVideoAReadThread = CreateThread(NULL, 0, VideoAReadProc, this, 0, NULL);
m_hVideoDrawMyMovieThread = CreateThread(NULL, 0, VideoDrawMyMovieProc, this, 0, NULL);
} while (0);
return ret;
}
int CDrawMyMovie::WaitFinish()
{
int ret = 0;
do
{
if (NULL == m_hVideoAReadThread)
{
break;
}
WaitForSingleObject(m_hVideoAReadThread, INFINITE);
CloseHandle(m_hVideoAReadThread);
m_hVideoAReadThread = NULL;
WaitForSingleObject(m_hVideoDrawMyMovieThread, INFINITE);
CloseHandle(m_hVideoDrawMyMovieThread);
m_hVideoDrawMyMovieThread = NULL;
} while (0);
return ret;
}
int CDrawMyMovie::OpenFileA(const char *pFileA)
{
int ret = -1;
do
{
if ((ret = avformat_open_input(&m_pFormatCtx_FileA, pFileA, 0, 0)) < 0) {
printf("Could not open input file.");
break;
}
if ((ret = avformat_find_stream_info(m_pFormatCtx_FileA, 0)) < 0) {
printf("Failed to retrieve input stream information");
break;
}
if (m_pFormatCtx_FileA->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
{
break;
}
m_pReadCodec_VideoA = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileA->streams[0]->codecpar->codec_id);
m_pReadCodecCtx_VideoA = avcodec_alloc_context3(m_pReadCodec_VideoA);
if (m_pReadCodecCtx_VideoA == NULL)
{
break;
}
avcodec_parameters_to_context(m_pReadCodecCtx_VideoA, m_pFormatCtx_FileA->streams[0]->codecpar);
m_iVideoWidth = m_pReadCodecCtx_VideoA->width;
m_iVideoHeight = m_pReadCodecCtx_VideoA->height;
m_pReadCodecCtx_VideoA->framerate = m_pFormatCtx_FileA->streams[0]->r_frame_rate;
if (avcodec_open2(m_pReadCodecCtx_VideoA, m_pReadCodec_VideoA, NULL) < 0)
{
break;
}
ret = 0;
} while (0);
return ret;
}
int CDrawMyMovie::ReadMovieFile(const char *pFileMovie, int iWidth, int iHeight)
{
m_iMovieWidth = iWidth;
m_iMovieHeight = iHeight;
int ret = -1;
do
{
FILE *fp = fopen(pFileMovie, "rb");
if (NULL == fp)
{
break;
}
m_pMovieY = new uint8_t[iWidth * iHeight];
m_pMovieU = new uint8_t[iWidth * iHeight / 4];
m_pMovieV = new uint8_t[iWidth * iHeight / 4];
int iread = fread(m_pMovieY, 1, iWidth * iHeight, fp);
iread = fread(m_pMovieU, 1, iWidth * iHeight / 4, fp);
iread = fread(m_pMovieV, 1, iWidth * iHeight / 4, fp);
ret = 0;
} while (0);
return ret;
}
int CDrawMyMovie::OpenOutPut(const char *pFileOut)
{
int iRet = -1;
AVStream *pAudioStream = NULL;
AVStream *pVideoStream = NULL;
do
{
avformat_alloc_output_context2(&m_pFormatCtx_Out, NULL, NULL, pFileOut);
{
AVCodec* pCodecEncode_Video = (AVCodec *)avcodec_find_encoder(m_pFormatCtx_Out->oformat->video_codec);
m_pCodecEncodeCtx_Video = avcodec_alloc_context3(pCodecEncode_Video);
if (!m_pCodecEncodeCtx_Video)
{
break;
}
pVideoStream = avformat_new_stream(m_pFormatCtx_Out, pCodecEncode_Video);
if (!pVideoStream)
{
break;
}
int frameRate = 10;
m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_QSCALE;
m_pCodecEncodeCtx_Video->bit_rate = 4000000;
m_pCodecEncodeCtx_Video->rc_min_rate = 4000000;
m_pCodecEncodeCtx_Video->rc_max_rate = 4000000;
m_pCodecEncodeCtx_Video->bit_rate_tolerance = 4000000;
m_pCodecEncodeCtx_Video->time_base.den = frameRate;
m_pCodecEncodeCtx_Video->time_base.num = 1;
m_pCodecEncodeCtx_Video->width = m_iVideoWidth;
m_pCodecEncodeCtx_Video->height = m_iVideoHeight;
//pH264Encoder->pCodecCtx->frame_number = 1;
m_pCodecEncodeCtx_Video->gop_size = 12;
m_pCodecEncodeCtx_Video->max_b_frames = 0;
m_pCodecEncodeCtx_Video->thread_count = 4;
m_pCodecEncodeCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;
m_pCodecEncodeCtx_Video->codec_id = AV_CODEC_ID_H264;
m_pCodecEncodeCtx_Video->codec_type = AVMEDIA_TYPE_VIDEO;
av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "b-pyramid", "none", 0);
av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "preset", "superfast", 0);
av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "tune", "zerolatency", 0);
if (m_pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(m_pCodecEncodeCtx_Video, pCodecEncode_Video, 0) < 0)
{
//编码器打开失败,退出程序
break;
}
}
if (!(m_pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&m_pFormatCtx_Out->pb, pFileOut, AVIO_FLAG_WRITE) < 0)
{
break;
}
}
avcodec_parameters_from_context(pVideoStream->codecpar, m_pCodecEncodeCtx_Video);
if (avformat_write_header(m_pFormatCtx_Out, NULL) < 0)
{
break;
}
iRet = 0;
} while (0);
if (iRet != 0)
{
if (m_pCodecEncodeCtx_Video != NULL)
{
avcodec_free_context(&m_pCodecEncodeCtx_Video);
m_pCodecEncodeCtx_Video = NULL;
}
if (m_pFormatCtx_Out != NULL)
{
avformat_free_context(m_pFormatCtx_Out);
m_pFormatCtx_Out = NULL;
}
}
return iRet;
}
DWORD WINAPI CDrawMyMovie::VideoAReadProc(LPVOID lpParam)
{
CDrawMyMovie *pVideoMerge = (CDrawMyMovie *)lpParam;
if (pVideoMerge != NULL)
{
pVideoMerge->VideoARead();
}
return 0;
}
void CDrawMyMovie::VideoARead()
{
AVFrame *pFrame;
pFrame = av_frame_alloc();
int y_size = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height;
char *pY = new char[y_size];
char *pU = new char[y_size / 4];
char *pV = new char[y_size / 4];
AVPacket packet = { 0 };
int ret = 0;
while (1)
{
av_packet_unref(&packet);
ret = av_read_frame(m_pFormatCtx_FileA, &packet);
if (ret == AVERROR(EAGAIN))
{
continue;
}
else if (ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
break;
}
ret = avcodec_send_packet(m_pReadCodecCtx_VideoA, &packet);
if (ret >= 0)
{
ret = avcodec_receive_frame(m_pReadCodecCtx_VideoA, pFrame);
if (ret == AVERROR(EAGAIN))
{
continue;
}
else if (ret == AVERROR_EOF)
{
break;
}
else if (ret < 0) {
break;
}
while (1)
{
if (av_fifo_space(m_pVideoAFifo) >= m_iYuv420FrameSize)
{
///Y
int contY = 0;
for (int i = 0; i < pFrame->height; i++)
{
memcpy(pY + contY, pFrame->data[0] + i * pFrame->linesize[0], pFrame->width);
contY += pFrame->width;
}
///U
int contU = 0;
for (int i = 0; i < pFrame->height / 2; i++)
{
memcpy(pU + contU, pFrame->data[1] + i * pFrame->linesize[1], pFrame->width / 2);
contU += pFrame->width / 2;
}
///V
int contV = 0;
for (int i = 0; i < pFrame->height / 2; i++)
{
memcpy(pV + contV, pFrame->data[2] + i * pFrame->linesize[2], pFrame->width / 2);
contV += pFrame->width / 2;
}
EnterCriticalSection(&m_csVideoASection);
av_fifo_generic_write(m_pVideoAFifo, pY, y_size, NULL);
av_fifo_generic_write(m_pVideoAFifo, pU, y_size / 4, NULL);
av_fifo_generic_write(m_pVideoAFifo, pV, y_size / 4, NULL);
LeaveCriticalSection(&m_csVideoASection);
break;
}
else
{
Sleep(100);
}
}
}
if (ret == AVERROR(EAGAIN))
{
continue;
}
}
av_frame_free(&pFrame);
delete[] pY;
delete[] pU;
delete[] pV;
}
DWORD WINAPI CDrawMyMovie::VideoDrawMyMovieProc(LPVOID lpParam)
{
CDrawMyMovie *pVideoMerge = (CDrawMyMovie *)lpParam;
if (pVideoMerge != NULL)
{
pVideoMerge->VideoDrawMyMovie();
}
return 0;
}
void CDrawMyMovie::VideoDrawMyMovie()
{
int ret = 0;
DWORD dwBeginTime = ::GetTickCount();
AVFrame *pFrameVideoA = av_frame_alloc();
uint8_t *videoA_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
av_image_fill_arrays(pFrameVideoA->data, pFrameVideoA->linesize, videoA_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);
pFrameVideoA->width = m_iVideoWidth;
pFrameVideoA->height = m_iVideoHeight;
pFrameVideoA->format = AV_PIX_FMT_YUV420P;
int iYuv420MovieFrameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_iMovieWidth, m_iMovieHeight, 1);
AVFrame *pFrameVideoMovie = av_frame_alloc();
uint8_t *videoMovie_buffer_yuv420 = (uint8_t *)av_malloc(iYuv420MovieFrameSize);
av_image_fill_arrays(pFrameVideoMovie->data, pFrameVideoMovie->linesize, videoMovie_buffer_yuv420, AV_PIX_FMT_YUV420P, m_iMovieWidth, m_iMovieHeight, 1);
pFrameVideoMovie->width = m_iMovieWidth;
pFrameVideoMovie->height = m_iMovieHeight;
pFrameVideoMovie->format = AV_PIX_FMT_YUV420P;
memcpy(pFrameVideoMovie->data[0], m_pMovieY, m_iMovieWidth * m_iMovieHeight);
memcpy(pFrameVideoMovie->data[1], m_pMovieU, m_iMovieWidth * m_iMovieHeight / 4);
memcpy(pFrameVideoMovie->data[2], m_pMovieV, m_iMovieWidth * m_iMovieHeight / 4);
AVFrame* pFrame_out = av_frame_alloc();
uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
av_image_fill_arrays(pFrame_out->data, pFrame_out->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, m_iVideoWidth, m_iVideoHeight, 1);
AVPacket packet = { 0 };
int iPicCount = 0;
while (1)
{
if (NULL == m_pVideoAFifo)
{
break;
}
int iVideoASize = av_fifo_size(m_pVideoAFifo);
if (iVideoASize >= m_iYuv420FrameSize)
{
EnterCriticalSection(&m_csVideoASection);
av_fifo_generic_read(m_pVideoAFifo, videoA_buffer_yuv420, m_iYuv420FrameSize, NULL);
LeaveCriticalSection(&m_csVideoASection);
pFrameVideoA->pkt_dts = pFrameVideoA->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pFrameVideoA->pkt_duration = 0;
pFrameVideoA->pkt_pos = -1;
pFrameVideoMovie->pkt_dts = pFrameVideoMovie->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pFrameVideoMovie->pkt_duration = 0;
pFrameVideoMovie->pkt_pos = -1;
ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoA, pFrameVideoA);
if (ret < 0)
{
break;
}
ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoMovie, pFrameVideoMovie);
if (ret < 0)
{
break;
}
ret = av_buffersink_get_frame(m_pFilterCtxSink, pFrame_out);
if (ret < 0)
{
//printf("Mixer: failed to call av_buffersink_get_frame_flags\n");
break;
}
pFrame_out->pkt_dts = pFrame_out->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pFrame_out->pkt_duration = 0;
pFrame_out->pkt_pos = -1;
pFrame_out->width = m_iVideoWidth;
pFrame_out->height = m_iVideoHeight;
pFrame_out->format = AV_PIX_FMT_YUV420P;
ret = avcodec_send_frame(m_pCodecEncodeCtx_Video, pFrame_out);
ret = avcodec_receive_packet(m_pCodecEncodeCtx_Video, &packet);
av_write_frame(m_pFormatCtx_Out, &packet);
iPicCount++;
}
else
{
if (m_hVideoAReadThread == NULL)
{
break;
}
Sleep(1);
}
}
av_write_trailer(m_pFormatCtx_Out);
avio_close(m_pFormatCtx_Out->pb);
av_frame_free(&pFrameVideoA);
}
int CDrawMyMovie::InitFilter(const char* filter_desc)
{
int ret = 0;
char args_videoA[512];
char args_videoMovie[512];
const char* pad_name_videoA = "in0";
const char* pad_name_videoMovie = "in1";
AVFilter* filter_src_videoA = (AVFilter *)avfilter_get_by_name("buffer");
AVFilter* filter_src_videoMovie = (AVFilter *)avfilter_get_by_name("buffer");
AVFilter* filter_sink = (AVFilter *)avfilter_get_by_name("buffersink");
AVFilterInOut* filter_output_videoA = avfilter_inout_alloc();
AVFilterInOut* filter_output_videoMovie = avfilter_inout_alloc();
AVFilterInOut* filter_input = avfilter_inout_alloc();
m_pFilterGraph = avfilter_graph_alloc();
AVRational timeBase;
timeBase.num = 1;
timeBase.den = 10;
AVRational timeAspect;
timeAspect.num = 0;
timeAspect.den = 1;
_snprintf(args_videoA, sizeof(args_videoA),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
m_iVideoWidth, m_iVideoHeight, AV_PIX_FMT_YUV420P,
timeBase.num, timeBase.den,
timeAspect.num,
timeAspect.den);
_snprintf(args_videoMovie, sizeof(args_videoMovie),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
m_iMovieWidth, m_iMovieHeight, AV_PIX_FMT_YUV420P,
timeBase.num, timeBase.den,
timeAspect.num,
timeAspect.den);
AVFilterInOut* filter_outputs[2];
do
{
ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoA, filter_src_videoA, pad_name_videoA, args_videoA, NULL, m_pFilterGraph);
if (ret < 0)
{
break;
}
ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoMovie, filter_src_videoMovie, pad_name_videoMovie, args_videoMovie, NULL, m_pFilterGraph);
if (ret < 0)
{
break;
}
ret = avfilter_graph_create_filter(&m_pFilterCtxSink, filter_sink, "out", NULL, NULL, m_pFilterGraph);
if (ret < 0)
{
break;
}
ret = av_opt_set_bin(m_pFilterCtxSink, "pix_fmts", (uint8_t*)&m_pCodecEncodeCtx_Video->pix_fmt, sizeof(m_pCodecEncodeCtx_Video->pix_fmt), AV_OPT_SEARCH_CHILDREN);
filter_output_videoA->name = av_strdup(pad_name_videoA);
filter_output_videoA->filter_ctx = m_pFilterCtxSrcVideoA;
filter_output_videoA->pad_idx = 0;
filter_output_videoA->next = filter_output_videoMovie;
filter_output_videoMovie->name = av_strdup(pad_name_videoMovie);
filter_output_videoMovie->filter_ctx = m_pFilterCtxSrcVideoMovie;
filter_output_videoMovie->pad_idx = 0;
filter_output_videoMovie->next = NULL;
filter_input->name = av_strdup("out");
filter_input->filter_ctx = m_pFilterCtxSink;
filter_input->pad_idx = 0;
filter_input->next = NULL;
//filter_outputs[0] = filter_output_videoPad;
filter_outputs[0] = filter_output_videoA;
filter_outputs[1] = filter_output_videoMovie;
ret = avfilter_graph_parse_ptr(m_pFilterGraph, filter_desc, &filter_input, filter_outputs, NULL);
if (ret < 0)
{
break;
}
ret = avfilter_graph_config(m_pFilterGraph, NULL);
if (ret < 0)
{
break;
}
ret = 0;
} while (0);
avfilter_inout_free(&filter_input);
av_free(filter_src_videoA);
avfilter_inout_free(filter_outputs);
char* temp = avfilter_graph_dump(m_pFilterGraph, NULL);
return ret;
}