ffmpeg利用滤镜合并四个视频,左一右三

今天利用ffmpeg的滤镜功能合并,左一右三方式,如下所示:
在这里插入图片描述
读者需要先对滤镜的描述字符串有所了解,读者可以参看我写的一篇博客:
ffmpeg利用滤镜进行视频混合(命令行)

四个文件都是1920x1080,时长一分钟,帧率为10.
关于主调函数,如下所示:

int main()
{
	CVideoMerge cVideoMerge;
	const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-vs.mp4";
	const char *pFileB = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-e.mp4";
	const char *pFileC = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
	const char *pFileD = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-zhuomian.mp4";


	const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\out-merge.mp4";
	cVideoMerge.StartMerge(pFileA, pFileB, pFileC, pFileD, pFileOut);
	cVideoMerge.WaitFinish();
	return 0;
}

StartMerge的部分代码如下所示:

//申请30帧缓存
		m_pVideoAFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoBFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoCFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoDFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);

		m_hVideoAReadThread = CreateThread(NULL, 0, VideoAReadProc, this, 0, NULL);
		m_hVideoBReadThread = CreateThread(NULL, 0, VideoBReadProc, this, 0, NULL);
		m_hVideoCReadThread = CreateThread(NULL, 0, VideoCReadProc, this, 0, NULL);
		m_hVideoDReadThread = CreateThread(NULL, 0, VideoDReadProc, this, 0, NULL);

		m_hVideoMergeThread = CreateThread(NULL, 0, VideoMergeProc, this, 0, NULL);

可以看出,上面创建了四个队列,五个线程,其中线程m_hVideoAReadThread至m_hVideoDReadThread用于读取本地视频文件,并分别写入对了m_pVideoAFifo至m_pVideoDFifo。

然后m_hVideoMergeThread从队列中取出四帧数据,然后送入滤镜,进行合并,然后再编解码。

整个工程的代码结构如下:
在这里插入图片描述

下面分别给出这三个文件的内容:
FfmpegMerge4File.cpp的内容如下:

#include <iostream>
#include "VideoMerge.h"

#ifdef	__cplusplus
extern "C"
{
#endif

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")


#ifdef __cplusplus
};
#endif





int main()
{
	CVideoMerge cVideoMerge;
	const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-vs.mp4";
	const char *pFileB = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-e.mp4";
	const char *pFileC = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-computer.mp4";
	const char *pFileD = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-zhuomian.mp4";


	const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\out-merge.mp4";
	cVideoMerge.StartMerge(pFileA, pFileB, pFileC, pFileD, pFileOut);
	cVideoMerge.WaitFinish();
	return 0;
}


VideoMerge.h的内容如下:

#pragma once

#include <Windows.h>

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"

#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"


#ifdef __cplusplus
};
#endif

class CVideoMerge
{
public:
	CVideoMerge();
	~CVideoMerge();
public:
	int StartMerge(const char *pFileA, const char *pFileB, const char *pFileC, const char *pFileD, const char *pFileOut);
	int WaitFinish();
private:
	int OpenFileA(const char *pFileA);
	int OpenFileB(const char *pFileB);
	int OpenFileC(const char *pFileC);
	int OpenFileD(const char *pFileD);
	int OpenOutPut(const char *pFileOut);
	int InitFilter(const char* filter_desc);
private:
	static DWORD WINAPI VideoAReadProc(LPVOID lpParam);
	void VideoARead();

	static DWORD WINAPI VideoBReadProc(LPVOID lpParam);
	void VideoBRead();

	static DWORD WINAPI VideoCReadProc(LPVOID lpParam);
	void VideoCRead();

	static DWORD WINAPI VideoDReadProc(LPVOID lpParam);
	void VideoDRead();

	static DWORD WINAPI VideoMergeProc(LPVOID lpParam);
	void VideoMerge();
private:
	AVFormatContext *m_pFormatCtx_FileA = NULL;
	AVFormatContext *m_pFormatCtx_FileB = NULL;
	AVFormatContext *m_pFormatCtx_FileC = NULL;
	AVFormatContext *m_pFormatCtx_FileD = NULL;

	AVCodecContext *m_pReadCodecCtx_VideoA = NULL;
	AVCodec *m_pReadCodec_VideoA = NULL;

	AVCodecContext *m_pReadCodecCtx_VideoB = NULL;
	AVCodec *m_pReadCodec_VideoB = NULL;

	AVCodecContext *m_pReadCodecCtx_VideoC = NULL;
	AVCodec *m_pReadCodec_VideoC = NULL;

	AVCodecContext *m_pReadCodecCtx_VideoD = NULL;
	AVCodec *m_pReadCodec_VideoD = NULL;

	AVCodecContext	*m_pCodecEncodeCtx_Video = NULL;
	AVFormatContext *m_pFormatCtx_Out = NULL;

	AVFifoBuffer *m_pVideoAFifo = NULL;
	AVFifoBuffer *m_pVideoBFifo = NULL;
	AVFifoBuffer *m_pVideoCFifo = NULL;
	AVFifoBuffer *m_pVideoDFifo = NULL;

	AVFilterGraph* m_pFilterGraph = NULL;
	AVFilterContext* m_pFilterCtxSrcVideoA = NULL;
	AVFilterContext* m_pFilterCtxSrcVideoB = NULL;
	AVFilterContext* m_pFilterCtxSrcVideoC = NULL;
	AVFilterContext* m_pFilterCtxSrcVideoD = NULL;
	AVFilterContext* m_pFilterCtxSink = NULL;

	int m_iMergeWidth = 2560;
	int m_iMergeHeight = 1080;
	int m_iYuv420FrameSize = 0;
private:
	CRITICAL_SECTION m_csVideoASection;
	CRITICAL_SECTION m_csVideoBSection;
	CRITICAL_SECTION m_csVideoCSection;
	CRITICAL_SECTION m_csVideoDSection;
	HANDLE m_hVideoAReadThread = NULL;
	HANDLE m_hVideoBReadThread = NULL;
	HANDLE m_hVideoCReadThread = NULL;
	HANDLE m_hVideoDReadThread = NULL;
	HANDLE m_hVideoMergeThread = NULL;
};





VideoMerge.cpp的内容如下:


#include "VideoMerge.h"
#include "log/log.h"





CVideoMerge::CVideoMerge()
{
	InitializeCriticalSection(&m_csVideoASection);
	InitializeCriticalSection(&m_csVideoBSection);
	InitializeCriticalSection(&m_csVideoCSection);
	InitializeCriticalSection(&m_csVideoDSection);
}

CVideoMerge::~CVideoMerge()
{
	DeleteCriticalSection(&m_csVideoASection);
	DeleteCriticalSection(&m_csVideoBSection);
	DeleteCriticalSection(&m_csVideoCSection);
	DeleteCriticalSection(&m_csVideoDSection);
}

int CVideoMerge::StartMerge(const char *pFileA, const char *pFileB, const char *pFileC, const char *pFileD, const char *pFileOut)
{
	int ret = -1;
	do 
	{
		ret = OpenFileA(pFileA);
		if (ret != 0)
		{
			break;
		}

		ret = OpenFileB(pFileB);
		if (ret != 0)
		{
			break;
		}

		ret = OpenFileC(pFileC);
		if (ret != 0)
		{
			break;
		}

		ret = OpenFileD(pFileD);
		if (ret != 0)
		{
			break;
		}

		ret = OpenOutPut(pFileOut);
		if (ret != 0)
		{
			break;
		}

		///这个滤镜的效果是以第一个视频做模板,且第一个视频的铺满全屏,第二个视频覆盖在第一个视频的右半部分
		//const char* filter_desc = "[in0]pad=1920:1080[x1];[in1]scale=w=960:h=1080[inn1];[x1][inn1]overlay=960:0[out]";


		const char* filter_desc = "[in0]split[main][tmp];[main]scale=w=2560:h=1080[base];[base][tmp]overlay=0:0[x0];[in1]scale=w=640:h=360[scale1];[x0][scale1]overlay=1920[x1];[in2]scale=w=640:h=360[scale2];[x1][scale2]overlay=1920:360[x2];[in3]scale=w=640:h=360[scale3];[x2][scale3]overlay=1920:720[out]";

		ret = InitFilter(filter_desc);
		if (ret < 0)
		{
			break;
		}

		m_iYuv420FrameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);
		//申请30帧缓存
		m_pVideoAFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoBFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoCFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);
		m_pVideoDFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);

		m_hVideoAReadThread = CreateThread(NULL, 0, VideoAReadProc, this, 0, NULL);
		m_hVideoBReadThread = CreateThread(NULL, 0, VideoBReadProc, this, 0, NULL);
		m_hVideoCReadThread = CreateThread(NULL, 0, VideoCReadProc, this, 0, NULL);
		m_hVideoDReadThread = CreateThread(NULL, 0, VideoDReadProc, this, 0, NULL);

		m_hVideoMergeThread = CreateThread(NULL, 0, VideoMergeProc, this, 0, NULL);

	} while (0);

	return ret;
}

int CVideoMerge::WaitFinish()
{
	int ret = 0;
	do 
	{
		if (NULL == m_hVideoAReadThread || NULL == m_hVideoBReadThread)
		{
			break;
		}
		WaitForSingleObject(m_hVideoAReadThread, INFINITE);
		WaitForSingleObject(m_hVideoBReadThread, INFINITE);
		WaitForSingleObject(m_hVideoCReadThread, INFINITE);
		WaitForSingleObject(m_hVideoDReadThread, INFINITE);

		CloseHandle(m_hVideoAReadThread);
		m_hVideoAReadThread = NULL;
		CloseHandle(m_hVideoBReadThread);
		m_hVideoBReadThread = NULL;
		CloseHandle(m_hVideoCReadThread);
		m_hVideoCReadThread = NULL;
		CloseHandle(m_hVideoDReadThread);
		m_hVideoDReadThread = NULL;

		WaitForSingleObject(m_hVideoMergeThread, INFINITE);
		CloseHandle(m_hVideoMergeThread);
		m_hVideoMergeThread = NULL;
	} while (0);

	return ret;
}

int CVideoMerge::OpenFileA(const char *pFileA)
{
	int ret = -1;

	do
	{
		if ((ret = avformat_open_input(&m_pFormatCtx_FileA, pFileA, 0, 0)) < 0) {
			printf("Could not open input file.");
			break;
		}
		if ((ret = avformat_find_stream_info(m_pFormatCtx_FileA, 0)) < 0) {
			printf("Failed to retrieve input stream information");
			break;
		}

		if (m_pFormatCtx_FileA->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
		{
			break;
		}
		m_pReadCodec_VideoA = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileA->streams[0]->codecpar->codec_id);

		m_pReadCodecCtx_VideoA = avcodec_alloc_context3(m_pReadCodec_VideoA);

		if (m_pReadCodecCtx_VideoA == NULL)
		{
			break;
		}
		avcodec_parameters_to_context(m_pReadCodecCtx_VideoA, m_pFormatCtx_FileA->streams[0]->codecpar);

		m_pReadCodecCtx_VideoA->framerate = m_pFormatCtx_FileA->streams[0]->r_frame_rate;

		if (avcodec_open2(m_pReadCodecCtx_VideoA, m_pReadCodec_VideoA, NULL) < 0)
		{
			break;
		}

		ret = 0;
	} while (0);


	return ret;
}

int CVideoMerge::OpenFileB(const char *pFileB)
{
	int ret = -1;

	do 
	{
		if ((ret = avformat_open_input(&m_pFormatCtx_FileB, pFileB, 0, 0)) < 0) {
			printf("Could not open input file.");
			break;
		}
		if ((ret = avformat_find_stream_info(m_pFormatCtx_FileB, 0)) < 0) {
			printf("Failed to retrieve input stream information");
			break;
		}

		if (m_pFormatCtx_FileB->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
		{
			break;
		}
		m_pReadCodec_VideoB = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileB->streams[0]->codecpar->codec_id);

		m_pReadCodecCtx_VideoB = avcodec_alloc_context3(m_pReadCodec_VideoB);

		if (m_pReadCodecCtx_VideoB == NULL)
		{
			break;
		}
		avcodec_parameters_to_context(m_pReadCodecCtx_VideoB, m_pFormatCtx_FileB->streams[0]->codecpar);

		m_pReadCodecCtx_VideoB->framerate = m_pFormatCtx_FileB->streams[0]->r_frame_rate;

		if (avcodec_open2(m_pReadCodecCtx_VideoB, m_pReadCodec_VideoB, NULL) < 0)
		{
			break;
		}

		ret = 0;
	} while (0);
	

	return ret;
}

int CVideoMerge::OpenFileC(const char *pFileC)
{
	int ret = -1;

	do
	{
		if ((ret = avformat_open_input(&m_pFormatCtx_FileC, pFileC, 0, 0)) < 0) {
			printf("Could not open input file.");
			break;
		}
		if ((ret = avformat_find_stream_info(m_pFormatCtx_FileC, 0)) < 0) {
			printf("Failed to retrieve input stream information");
			break;
		}

		if (m_pFormatCtx_FileC->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
		{
			break;
		}
		m_pReadCodec_VideoC = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileC->streams[0]->codecpar->codec_id);

		m_pReadCodecCtx_VideoC = avcodec_alloc_context3(m_pReadCodec_VideoC);

		if (m_pReadCodecCtx_VideoC == NULL)
		{
			break;
		}
		avcodec_parameters_to_context(m_pReadCodecCtx_VideoC, m_pFormatCtx_FileC->streams[0]->codecpar);

		m_pReadCodecCtx_VideoC->framerate = m_pFormatCtx_FileC->streams[0]->r_frame_rate;

		if (avcodec_open2(m_pReadCodecCtx_VideoC, m_pReadCodec_VideoC, NULL) < 0)
		{
			break;
		}

		ret = 0;
	} while (0);


	return ret;
}

int CVideoMerge::OpenFileD(const char *pFileD)
{
	int ret = -1;

	do
	{
		if ((ret = avformat_open_input(&m_pFormatCtx_FileD, pFileD, 0, 0)) < 0) {
			printf("Could not open input file.");
			break;
		}
		if ((ret = avformat_find_stream_info(m_pFormatCtx_FileD, 0)) < 0) {
			printf("Failed to retrieve input stream information");
			break;
		}

		if (m_pFormatCtx_FileD->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
		{
			break;
		}
		m_pReadCodec_VideoD = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileD->streams[0]->codecpar->codec_id);

		m_pReadCodecCtx_VideoD = avcodec_alloc_context3(m_pReadCodec_VideoD);

		if (m_pReadCodecCtx_VideoD == NULL)
		{
			break;
		}
		avcodec_parameters_to_context(m_pReadCodecCtx_VideoD, m_pFormatCtx_FileD->streams[0]->codecpar);

		m_pReadCodecCtx_VideoD->framerate = m_pFormatCtx_FileD->streams[0]->r_frame_rate;

		if (avcodec_open2(m_pReadCodecCtx_VideoD, m_pReadCodec_VideoD, NULL) < 0)
		{
			break;
		}

		ret = 0;
	} while (0);


	return ret;
}


int CVideoMerge::OpenOutPut(const char *pFileOut)
{
	int iRet = -1;

	AVStream *pAudioStream = NULL;
	AVStream *pVideoStream = NULL;

	do
	{
		avformat_alloc_output_context2(&m_pFormatCtx_Out, NULL, NULL, pFileOut);

		{
			AVCodec* pCodecEncode_Video = (AVCodec *)avcodec_find_encoder(m_pFormatCtx_Out->oformat->video_codec);

			m_pCodecEncodeCtx_Video = avcodec_alloc_context3(pCodecEncode_Video);
			if (!m_pCodecEncodeCtx_Video)
			{
				break;
			}

			pVideoStream = avformat_new_stream(m_pFormatCtx_Out, pCodecEncode_Video);
			if (!pVideoStream)
			{
				break;
			}

			int frameRate = 10;
			m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_QSCALE;
			m_pCodecEncodeCtx_Video->bit_rate = 4000000;
			m_pCodecEncodeCtx_Video->rc_min_rate = 4000000;
			m_pCodecEncodeCtx_Video->rc_max_rate = 4000000;
			m_pCodecEncodeCtx_Video->bit_rate_tolerance = 4000000;
			m_pCodecEncodeCtx_Video->time_base.den = frameRate;
			m_pCodecEncodeCtx_Video->time_base.num = 1;

			m_pCodecEncodeCtx_Video->width = m_iMergeWidth;
			m_pCodecEncodeCtx_Video->height = m_iMergeHeight;
			//pH264Encoder->pCodecCtx->frame_number = 1;
			m_pCodecEncodeCtx_Video->gop_size = 12;
			m_pCodecEncodeCtx_Video->max_b_frames = 0;
			m_pCodecEncodeCtx_Video->thread_count = 4;
			m_pCodecEncodeCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;
			m_pCodecEncodeCtx_Video->codec_id = AV_CODEC_ID_H264;
			m_pCodecEncodeCtx_Video->codec_type = AVMEDIA_TYPE_VIDEO;

			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "b-pyramid", "none", 0);
			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "preset", "superfast", 0);
			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "tune", "zerolatency", 0);

			if (m_pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
				m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

			if (avcodec_open2(m_pCodecEncodeCtx_Video, pCodecEncode_Video, 0) < 0)
			{
				//编码器打开失败,退出程序
				break;
			}
		}

		if (!(m_pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
		{
			if (avio_open(&m_pFormatCtx_Out->pb, pFileOut, AVIO_FLAG_WRITE) < 0)
			{
				break;
			}
		}

		avcodec_parameters_from_context(pVideoStream->codecpar, m_pCodecEncodeCtx_Video);

		if (avformat_write_header(m_pFormatCtx_Out, NULL) < 0)
		{
			break;
		}

		iRet = 0;
	} while (0);


	if (iRet != 0)
	{
		if (m_pCodecEncodeCtx_Video != NULL)
		{
			avcodec_free_context(&m_pCodecEncodeCtx_Video);
			m_pCodecEncodeCtx_Video = NULL;
		}

		if (m_pFormatCtx_Out != NULL)
		{
			avformat_free_context(m_pFormatCtx_Out);
			m_pFormatCtx_Out = NULL;
		}
	}

	return iRet;
}


int CVideoMerge::InitFilter(const char* filter_desc)
{
	int ret = 0;

	char args_videoA[512];
	const char* pad_name_videoA = "in0";
	char args_videoB[512];
	const char* pad_name_videoB = "in1";
	char args_videoC[512];
	const char* pad_name_videoC = "in2";
	char args_videoD[512];
	const char* pad_name_videoD = "in3";

	AVFilter* filter_src_videoA = (AVFilter *)avfilter_get_by_name("buffer");
	AVFilter* filter_src_videoB = (AVFilter *)avfilter_get_by_name("buffer");
	AVFilter* filter_src_videoC = (AVFilter *)avfilter_get_by_name("buffer");
	AVFilter* filter_src_videoD = (AVFilter *)avfilter_get_by_name("buffer");
	AVFilter* filter_sink = (AVFilter *)avfilter_get_by_name("buffersink");

	AVFilterInOut* filter_output_videoA = avfilter_inout_alloc();
	AVFilterInOut* filter_output_videoB = avfilter_inout_alloc();
	AVFilterInOut* filter_output_videoC = avfilter_inout_alloc();
	AVFilterInOut* filter_output_videoD = avfilter_inout_alloc();

	AVFilterInOut* filter_input = avfilter_inout_alloc();
	m_pFilterGraph = avfilter_graph_alloc();

	AVRational timeBase;
	timeBase.num = 1;
	timeBase.den = 10;


	AVRational timeAspect;
	timeAspect.num = 0;
	timeAspect.den = 1;

	_snprintf(args_videoA, sizeof(args_videoA),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		1920, 1080, AV_PIX_FMT_YUV420P,
		timeBase.num, timeBase.den,
		timeAspect.num,
		timeAspect.den);


	_snprintf(args_videoB, sizeof(args_videoB),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		1920, 1080, AV_PIX_FMT_YUV420P,
		timeBase.num, timeBase.den,
		timeAspect.num,
		timeAspect.den);

	_snprintf(args_videoC, sizeof(args_videoC),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		1920, 1080, AV_PIX_FMT_YUV420P,
		timeBase.num, timeBase.den,
		timeAspect.num,
		timeAspect.den);

	_snprintf(args_videoD, sizeof(args_videoD),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		1920, 1080, AV_PIX_FMT_YUV420P,
		timeBase.num, timeBase.den,
		timeAspect.num,
		timeAspect.den);


	AVFilterInOut* filter_outputs[4];
	do
	{
		ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoA, filter_src_videoA, pad_name_videoA, args_videoA, NULL, m_pFilterGraph);
		if (ret < 0)
		{
			break;
		}
		ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoB, filter_src_videoB, pad_name_videoB, args_videoB, NULL, m_pFilterGraph);
		if (ret < 0)
		{
			break;
		}

		ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoC, filter_src_videoC, pad_name_videoC, args_videoC, NULL, m_pFilterGraph);
		if (ret < 0)
		{
			break;
		}
		ret = avfilter_graph_create_filter(&m_pFilterCtxSrcVideoD, filter_src_videoD, pad_name_videoD, args_videoD, NULL, m_pFilterGraph);
		if (ret < 0)
		{
			break;
		}

		ret = avfilter_graph_create_filter(&m_pFilterCtxSink, filter_sink, "out", NULL, NULL, m_pFilterGraph);
		if (ret < 0)
		{
			break;
		}

		ret = av_opt_set_bin(m_pFilterCtxSink, "pix_fmts", (uint8_t*)&m_pCodecEncodeCtx_Video->pix_fmt, sizeof(m_pCodecEncodeCtx_Video->pix_fmt), AV_OPT_SEARCH_CHILDREN);

		/*filter_output_videoPad->name = av_strdup(pad_name_videoPad);
		filter_output_videoPad->filter_ctx = m_pFilterCtxSrcVideoPad;
		filter_output_videoPad->pad_idx = 0;
		filter_output_videoPad->next = filter_output_videoA;*/

		filter_output_videoA->name = av_strdup(pad_name_videoA);
		filter_output_videoA->filter_ctx = m_pFilterCtxSrcVideoA;
		filter_output_videoA->pad_idx = 0;
		filter_output_videoA->next = filter_output_videoB;

		filter_output_videoB->name = av_strdup(pad_name_videoB);
		filter_output_videoB->filter_ctx = m_pFilterCtxSrcVideoB;
		filter_output_videoB->pad_idx = 0;
		filter_output_videoB->next = filter_output_videoC;

		filter_output_videoC->name = av_strdup(pad_name_videoC);
		filter_output_videoC->filter_ctx = m_pFilterCtxSrcVideoC;
		filter_output_videoC->pad_idx = 0;
		filter_output_videoC->next = filter_output_videoD;

		filter_output_videoD->name = av_strdup(pad_name_videoD);
		filter_output_videoD->filter_ctx = m_pFilterCtxSrcVideoD;
		filter_output_videoD->pad_idx = 0;
		filter_output_videoD->next = NULL;

		filter_input->name = av_strdup("out");
		filter_input->filter_ctx = m_pFilterCtxSink;
		filter_input->pad_idx = 0;
		filter_input->next = NULL;

		//filter_outputs[0] = filter_output_videoPad;
		filter_outputs[0] = filter_output_videoA;
		filter_outputs[1] = filter_output_videoB;
		filter_outputs[2] = filter_output_videoC;
		filter_outputs[3] = filter_output_videoD;

		ret = avfilter_graph_parse_ptr(m_pFilterGraph, filter_desc, &filter_input, filter_outputs, NULL);
		if (ret < 0)
		{ 
			break;
		}

		ret = avfilter_graph_config(m_pFilterGraph, NULL);
		if (ret < 0)
		{
			break;
		}

		ret = 0;

	} while (0);


	avfilter_inout_free(&filter_input);
	av_free(filter_src_videoA);
	av_free(filter_src_videoB);
	av_free(filter_src_videoC);
	av_free(filter_src_videoD);
	avfilter_inout_free(filter_outputs);

	char* temp = avfilter_graph_dump(m_pFilterGraph, NULL);

	return ret;
}


DWORD WINAPI CVideoMerge::VideoAReadProc(LPVOID lpParam)
{
	CVideoMerge *pVideoMerge = (CVideoMerge *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoARead();
	}
	return 0;
}

void CVideoMerge::VideoARead()
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	int y_size = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height;

	AVPacket packet = { 0 };
	int ret = 0;
	while (1)
	{
		av_packet_unref(&packet);

		ret = av_read_frame(m_pFormatCtx_FileA, &packet);
		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
		else if (ret == AVERROR_EOF)
		{
			break;
		}
		else if (ret < 0) 
		{
			break;
		}

		ret = avcodec_send_packet(m_pReadCodecCtx_VideoA, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(m_pReadCodecCtx_VideoA, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				break;
			}
			while (1)
			{
				if (av_fifo_space(m_pVideoAFifo) >= m_iYuv420FrameSize)
				{
					EnterCriticalSection(&m_csVideoASection);
					av_fifo_generic_write(m_pVideoAFifo, pFrame->data[0], y_size, NULL);
					av_fifo_generic_write(m_pVideoAFifo, pFrame->data[1], y_size / 4, NULL);
					av_fifo_generic_write(m_pVideoAFifo, pFrame->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&m_csVideoASection);

					break;
				}
				else
				{
					Sleep(100);
				}
			}

		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
}

DWORD WINAPI CVideoMerge::VideoBReadProc(LPVOID lpParam)
{
	CVideoMerge *pVideoMerge = (CVideoMerge *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoBRead();
	}
	return 0;
}

void CVideoMerge::VideoBRead()
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	int y_size = m_pReadCodecCtx_VideoB->width * m_pReadCodecCtx_VideoB->height;

	AVPacket packet = { 0 };
	int ret = 0;

	int iCount = 0;

	while (1)
	{
		av_packet_unref(&packet);

		ret = av_read_frame(m_pFormatCtx_FileB, &packet);
		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
		else if (ret == AVERROR_EOF)
		{
			break;
		}
		else if (ret < 0)
		{
			break;
		}

		ret = avcodec_send_packet(m_pReadCodecCtx_VideoB, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(m_pReadCodecCtx_VideoB, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				break;
			}

			while (1)
			{
				if (av_fifo_space(m_pVideoBFifo) >= m_iYuv420FrameSize)
				{
					EnterCriticalSection(&m_csVideoBSection);
					av_fifo_generic_write(m_pVideoBFifo, pFrame->data[0], y_size, NULL);
					av_fifo_generic_write(m_pVideoBFifo, pFrame->data[1], y_size / 4, NULL);
					av_fifo_generic_write(m_pVideoBFifo, pFrame->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&m_csVideoBSection);

					break;
				}
				else
				{
					Sleep(100);
				}
			}
		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
}

DWORD WINAPI CVideoMerge::VideoCReadProc(LPVOID lpParam)
{
	CVideoMerge *pVideoMerge = (CVideoMerge *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoCRead();
	}
	return 0;
}

void CVideoMerge::VideoCRead()
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	int y_size = m_pReadCodecCtx_VideoC->width * m_pReadCodecCtx_VideoC->height;

	AVPacket packet = { 0 };
	int ret = 0;

	int iCount = 0;

	while (1)
	{
		av_packet_unref(&packet);

		ret = av_read_frame(m_pFormatCtx_FileC, &packet);
		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
		else if (ret == AVERROR_EOF)
		{
			break;
		}
		else if (ret < 0)
		{
			break;
		}

		ret = avcodec_send_packet(m_pReadCodecCtx_VideoC, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(m_pReadCodecCtx_VideoC, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				break;
			}

			while (1)
			{
				if (av_fifo_space(m_pVideoCFifo) >= m_iYuv420FrameSize)
				{
					EnterCriticalSection(&m_csVideoCSection);
					av_fifo_generic_write(m_pVideoCFifo, pFrame->data[0], y_size, NULL);
					av_fifo_generic_write(m_pVideoCFifo, pFrame->data[1], y_size / 4, NULL);
					av_fifo_generic_write(m_pVideoCFifo, pFrame->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&m_csVideoCSection);

					break;
				}
				else
				{
					Sleep(100);
				}
			}
		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
}

DWORD WINAPI CVideoMerge::VideoDReadProc(LPVOID lpParam)
{
	CVideoMerge *pVideoMerge = (CVideoMerge *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoDRead();
	}
	return 0;
}

void CVideoMerge::VideoDRead()
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	int y_size = m_pReadCodecCtx_VideoD->width * m_pReadCodecCtx_VideoD->height;

	AVPacket packet = { 0 };
	int ret = 0;

	int iCount = 0;

	while (1)
	{
		av_packet_unref(&packet);

		ret = av_read_frame(m_pFormatCtx_FileD, &packet);
		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
		else if (ret == AVERROR_EOF)
		{
			break;
		}
		else if (ret < 0)
		{
			break;
		}

		ret = avcodec_send_packet(m_pReadCodecCtx_VideoD, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(m_pReadCodecCtx_VideoD, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				break;
			}

			while (1)
			{
				if (av_fifo_space(m_pVideoDFifo) >= m_iYuv420FrameSize)
				{
					EnterCriticalSection(&m_csVideoDSection);
					av_fifo_generic_write(m_pVideoDFifo, pFrame->data[0], y_size, NULL);
					av_fifo_generic_write(m_pVideoDFifo, pFrame->data[1], y_size / 4, NULL);
					av_fifo_generic_write(m_pVideoDFifo, pFrame->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&m_csVideoDSection);

					break;
				}
				else
				{
					Sleep(100);
				}
			}
		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
}



DWORD WINAPI CVideoMerge::VideoMergeProc(LPVOID lpParam)
{
	CVideoMerge *pVideoMerge = (CVideoMerge *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoMerge();
	}
	return 0;
}


void CVideoMerge::VideoMerge()
{
	int ret = 0;

	AVFrame *pFrameVideoA = av_frame_alloc();
	uint8_t *videoA_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrameVideoA->data, pFrameVideoA->linesize, videoA_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);


	AVFrame *pFrameVideoB = av_frame_alloc();
	uint8_t *videoB_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrameVideoB->data, pFrameVideoB->linesize, videoB_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoB->width, m_pReadCodecCtx_VideoB->height, 1);

	AVFrame *pFrameVideoC = av_frame_alloc();
	uint8_t *videoC_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrameVideoC->data, pFrameVideoC->linesize, videoC_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoC->width, m_pReadCodecCtx_VideoC->height, 1);

	AVFrame *pFrameVideoD = av_frame_alloc();
	uint8_t *videoD_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrameVideoD->data, pFrameVideoD->linesize, videoD_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoD->width, m_pReadCodecCtx_VideoD->height, 1);


	int iOutVideoWidth = m_pReadCodecCtx_VideoB->width;
	int iOutVideoHeight = m_pReadCodecCtx_VideoB->height;

	AVPacket packet = { 0 };
	int iPicCount = 0;

	AVFrame* pFrame_out = av_frame_alloc();
	uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrame_out->data, pFrame_out->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);

	while (1)
	{
		if (NULL == m_pVideoAFifo)
		{
			break;
		}
		if (NULL == m_pVideoBFifo)
		{
			break;
		}
		if (NULL == m_pVideoCFifo)
		{
			break;
		}
		if (NULL == m_pVideoDFifo)
		{
			break;
		}

		int iVideoASize = av_fifo_size(m_pVideoAFifo);
		int iVideoBSize = av_fifo_size(m_pVideoBFifo);
		int iVideoCSize = av_fifo_size(m_pVideoCFifo);
		int iVideoDSize = av_fifo_size(m_pVideoDFifo);

		if (iVideoASize >= m_iYuv420FrameSize && iVideoBSize >= m_iYuv420FrameSize && iVideoCSize >= m_iYuv420FrameSize && iVideoDSize >= m_iYuv420FrameSize)
		{
			EnterCriticalSection(&m_csVideoASection);
			av_fifo_generic_read(m_pVideoAFifo, videoA_buffer_yuv420, m_iYuv420FrameSize, NULL);
			LeaveCriticalSection(&m_csVideoASection);


			EnterCriticalSection(&m_csVideoBSection);
			av_fifo_generic_read(m_pVideoBFifo, videoB_buffer_yuv420, m_iYuv420FrameSize, NULL);
			LeaveCriticalSection(&m_csVideoBSection);

			EnterCriticalSection(&m_csVideoCSection);
			av_fifo_generic_read(m_pVideoCFifo, videoC_buffer_yuv420, m_iYuv420FrameSize, NULL);
			LeaveCriticalSection(&m_csVideoCSection);

			EnterCriticalSection(&m_csVideoDSection);
			av_fifo_generic_read(m_pVideoDFifo, videoD_buffer_yuv420, m_iYuv420FrameSize, NULL);
			LeaveCriticalSection(&m_csVideoDSection);


			pFrameVideoA->pkt_dts = pFrameVideoA->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrameVideoA->pkt_duration = 0;
			pFrameVideoA->pkt_pos = -1;

			pFrameVideoA->width = iOutVideoWidth;
			pFrameVideoA->height = iOutVideoHeight;
			pFrameVideoA->format = AV_PIX_FMT_YUV420P;



			pFrameVideoB->pkt_dts = pFrameVideoB->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrameVideoB->pkt_duration = 0;
			pFrameVideoB->pkt_pos = -1;

			pFrameVideoB->width = iOutVideoWidth;
			pFrameVideoB->height = iOutVideoHeight;
			pFrameVideoB->format = AV_PIX_FMT_YUV420P;

			pFrameVideoC->pkt_dts = pFrameVideoC->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrameVideoC->pkt_duration = 0;
			pFrameVideoC->pkt_pos = -1;

			pFrameVideoC->width = iOutVideoWidth;
			pFrameVideoC->height = iOutVideoHeight;
			pFrameVideoC->format = AV_PIX_FMT_YUV420P;


			pFrameVideoD->pkt_dts = pFrameVideoD->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrameVideoD->pkt_duration = 0;
			pFrameVideoD->pkt_pos = -1;

			pFrameVideoD->width = iOutVideoWidth;
			pFrameVideoD->height = iOutVideoHeight;
			pFrameVideoD->format = AV_PIX_FMT_YUV420P;

			//if (iPicCount == 0)
			{
				LOG_INFO("begin av_buffersrc_add_frame");
				//ret = av_buffersrc_add_frame_flags(m_pFilterCtxSrcVideoA, pFrameVideoA, 0);
				ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoA, pFrameVideoA);
				if (ret < 0)
				{
					break;
				}

				//ret = av_buffersrc_add_frame_flags(m_pFilterCtxSrcVideoB, pFrameVideoB, 0);
				ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoB, pFrameVideoB);
				if (ret < 0)
				{
					break;
				}

				ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoC, pFrameVideoC);
				if (ret < 0)
				{
					break;
				}

				ret = av_buffersrc_add_frame(m_pFilterCtxSrcVideoD, pFrameVideoD);
				if (ret < 0)
				{
					break;
				}
			}
			
			do
			{
				//while (1)
				{
					ret = av_buffersink_get_frame(m_pFilterCtxSink, pFrame_out);
					if (ret < 0)
					{
						//printf("Mixer: failed to call av_buffersink_get_frame_flags\n");
						break;
					}

					LOG_INFO("end av_buffersink_get_frame_flags");

					pFrame_out->pkt_dts = pFrame_out->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					pFrame_out->pkt_duration = 0;
					pFrame_out->pkt_pos = -1;

					pFrame_out->width = m_iMergeWidth;
					pFrame_out->height = m_iMergeHeight;
					pFrame_out->format = AV_PIX_FMT_YUV420P;

					ret = avcodec_send_frame(m_pCodecEncodeCtx_Video, pFrame_out);

					ret = avcodec_receive_packet(m_pCodecEncodeCtx_Video, &packet);

					av_write_frame(m_pFormatCtx_Out, &packet);

					iPicCount++;

					av_frame_unref(pFrame_out);
				}

			} while (0);
		}
		else
		{
			if (m_hVideoAReadThread == NULL && m_hVideoBReadThread == NULL)
			{
				break;
			}
			Sleep(1);
		}
	}

	av_write_trailer(m_pFormatCtx_Out);
	avio_close(m_pFormatCtx_Out->pb);

	av_frame_free(&pFrame_out);

	av_frame_free(&pFrameVideoA);
	av_frame_free(&pFrameVideoB);
	av_frame_free(&pFrameVideoC);
	av_frame_free(&pFrameVideoD);
}





  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值