ffmpeg scale滤镜的大致实现

这里弄过简单的例子,来说明下尺寸变换是如何实现的。
比如,现在有个1920x1080的图片,要将其修改为960x1080的,怎么办了。
我们先看第一行,一共1920个像素点,此时需要转换为960个像素点,每个像素点有r,g,b三元素,则我们可以将原图中的第一个像素点和第二个像素点的r,g,b分别相加,然后除以2,就类似得到输出图中的第一个元素的r,g,b。
同理将原图中的第三个像素点和第四个像素点的r,g,b分别相加,然后除以2,就类似得到输出图中的第二个元素的r,g,b。

ffmpeg中像素的存储格式是yuv,可以仿照r,g,b进行处理。

下面列举下例子,本地有个1920x1080的视频文件,现在需要读取该视频文件,并且修改尺寸为960x1080。
工程结构如下所示,注意,这里的命名不严谨,Crop应该为Scale。
在这里插入图片描述

下面的代码展示了核心逻辑,其中pFrameVideoA为原视频文件对应的帧,pFrame_out为输出视频对应的帧。

///此处进行视频剪辑处理
uint8_t *dataY = pFrameVideoA->data[0];
uint8_t *dataU = pFrameVideoA->data[1];
uint8_t *dataV = pFrameVideoA->data[2];

for (int i = 0; i < iOriginalYSize; i += 2)
{
	dataCropY[i / 2] = (dataY[i] + dataY[i + 1]) / 2;
}

for (int i = 0; i < iOriginalUSize; i += 2)
{
	dataCropU[i / 2] = (dataU[i] + dataU[i + 1]) / 2;
}

for (int i = 0; i < iOriginalVSize; i += 2)
{
	dataCropV[i / 2] = (dataV[i] + dataV[i + 1]) / 2;
}

memcpy(pFrame_out->data[0], dataCropY, iOriginalYSize / 2);
memcpy(pFrame_out->data[1], dataCropU, iOriginalUSize / 2);
memcpy(pFrame_out->data[2], dataCropV, iOriginalVSize / 2);

FfmpegMyCropTest.cpp的内容如下:

#include <iostream>
#include "CropFile.h"

#ifdef	__cplusplus
extern "C"
{
#endif

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")


#ifdef __cplusplus
};
#endif





int main()
{
	CCropFile cVideoCopy;

	const char *pFileA = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\in-vs.mp4";

	const char *pFileOut = "E:\\learn\\ffmpeg\\FfmpegFilterTest\\x64\\Release\\out-mycrop.mp4";

	cVideoCopy.StartMyCrop(pFileA, pFileOut);
	cVideoCopy.WaitFinish();
	return 0;
}



CropFile.h的内容如下:

#pragma once

#include <Windows.h>

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"

#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"


#ifdef __cplusplus
};
#endif

class CCropFile
{
public:
	CCropFile();
	~CCropFile();
public:
	int StartMyCrop(const char *pFileA, const char *pFileOut);
	int WaitFinish();
private:
	int OpenFileA(const char *pFileA);
	int OpenOutPut(const char *pFileOut);
	int InitFilter(const char* filter_desc);
private:
	static DWORD WINAPI VideoAReadProc(LPVOID lpParam);
	void VideoARead();


	static DWORD WINAPI VideoCropProc(LPVOID lpParam);
	void VideoCrop();
private:
	AVFormatContext *m_pFormatCtx_FileA = NULL;

	AVCodecContext *m_pReadCodecCtx_VideoA = NULL;
	AVCodec *m_pReadCodec_VideoA = NULL;


	AVCodecContext	*m_pCodecEncodeCtx_Video = NULL;
	AVFormatContext *m_pFormatCtx_Out = NULL;

	AVFifoBuffer *m_pVideoAFifo = NULL;

	int m_iOutWidth = 960;
	int m_iOutHeight = 1080;
	int m_iYuv420FrameSize = 0;
private:
	AVFilterGraph* m_pFilterGraph = NULL;
	AVFilterContext* m_pFilterCtxSrcVideoA = NULL;
	AVFilterContext* m_pFilterCtxSink = NULL;
private:
	CRITICAL_SECTION m_csVideoASection;
	HANDLE m_hVideoAReadThread = NULL;
	HANDLE m_hVideoCrophread = NULL;
};






CropFile.cpp的内容如下:


#include "CropFile.h"
//#include "log/log.h"




void SaveAvFrame(AVFrame *avFrame)
{
	FILE *fDump = fopen("e:\\my.yuv", "ab");

	uint32_t pitchY = avFrame->linesize[0];
	uint32_t pitchU = avFrame->linesize[1];
	uint32_t pitchV = avFrame->linesize[2];

	uint8_t *avY = avFrame->data[0];
	uint8_t *avU = avFrame->data[1];
	uint8_t *avV = avFrame->data[2];

	for (uint32_t i = 0; i < avFrame->height; i++) {
		fwrite(avY, avFrame->width, 1, fDump);
		avY += pitchY;
	}

	for (uint32_t i = 0; i < avFrame->height / 2; i++) {
		fwrite(avU, avFrame->width / 2, 1, fDump);
		avU += pitchU;
	}

	for (uint32_t i = 0; i < avFrame->height / 2; i++) {
		fwrite(avV, avFrame->width / 2, 1, fDump);
		avV += pitchV;
	}

	fclose(fDump);
}



CCropFile::CCropFile()
{
	InitializeCriticalSection(&m_csVideoASection);
}

CCropFile::~CCropFile()
{
	DeleteCriticalSection(&m_csVideoASection);
}

int CCropFile::StartMyCrop(const char *pFileA, const char *pFileOut)
{
	int ret = -1;
	do
	{
		m_iOutWidth = 1920 / 2;
		m_iOutHeight = 1080;

		ret = OpenFileA(pFileA);
		if (ret != 0)
		{
			break;
		}

		ret = OpenOutPut(pFileOut);
		if (ret != 0)
		{
			break;
		}

		m_iYuv420FrameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);
		//申请30帧缓存
		m_pVideoAFifo = av_fifo_alloc(30 * m_iYuv420FrameSize);

		m_hVideoAReadThread = CreateThread(NULL, 0, VideoAReadProc, this, 0, NULL);

		m_hVideoCrophread = CreateThread(NULL, 0, VideoCropProc, this, 0, NULL);

	} while (0);

	return ret;
}

int CCropFile::WaitFinish()
{
	int ret = 0;
	do
	{
		if (NULL == m_hVideoAReadThread)
		{
			break;
		}
		WaitForSingleObject(m_hVideoAReadThread, INFINITE);

		CloseHandle(m_hVideoAReadThread);
		m_hVideoAReadThread = NULL;

		WaitForSingleObject(m_hVideoCrophread, INFINITE);
		CloseHandle(m_hVideoCrophread);
		m_hVideoCrophread = NULL;
	} while (0);

	return ret;
}

int CCropFile::OpenFileA(const char *pFileA)
{
	int ret = -1;

	do
	{
		if ((ret = avformat_open_input(&m_pFormatCtx_FileA, pFileA, 0, 0)) < 0) {
			printf("Could not open input file.");
			break;
		}
		if ((ret = avformat_find_stream_info(m_pFormatCtx_FileA, 0)) < 0) {
			printf("Failed to retrieve input stream information");
			break;
		}

		if (m_pFormatCtx_FileA->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
		{
			break;
		}
		m_pReadCodec_VideoA = (AVCodec *)avcodec_find_decoder(m_pFormatCtx_FileA->streams[0]->codecpar->codec_id);

		m_pReadCodecCtx_VideoA = avcodec_alloc_context3(m_pReadCodec_VideoA);

		if (m_pReadCodecCtx_VideoA == NULL)
		{
			break;
		}
		avcodec_parameters_to_context(m_pReadCodecCtx_VideoA, m_pFormatCtx_FileA->streams[0]->codecpar);


		m_pReadCodecCtx_VideoA->framerate = m_pFormatCtx_FileA->streams[0]->r_frame_rate;

		if (avcodec_open2(m_pReadCodecCtx_VideoA, m_pReadCodec_VideoA, NULL) < 0)
		{
			break;
		}

		ret = 0;
	} while (0);


	return ret;
}


int CCropFile::OpenOutPut(const char *pFileOut)
{
	int iRet = -1;

	AVStream *pAudioStream = NULL;
	AVStream *pVideoStream = NULL;

	do
	{
		avformat_alloc_output_context2(&m_pFormatCtx_Out, NULL, NULL, pFileOut);

		{
			AVCodec* pCodecEncode_Video = (AVCodec *)avcodec_find_encoder(m_pFormatCtx_Out->oformat->video_codec);

			m_pCodecEncodeCtx_Video = avcodec_alloc_context3(pCodecEncode_Video);
			if (!m_pCodecEncodeCtx_Video)
			{
				break;
			}

			pVideoStream = avformat_new_stream(m_pFormatCtx_Out, pCodecEncode_Video);
			if (!pVideoStream)
			{
				break;
			}

			int frameRate = 10;
			m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_QSCALE;
			m_pCodecEncodeCtx_Video->bit_rate = 4000000;
			m_pCodecEncodeCtx_Video->rc_min_rate = 4000000;
			m_pCodecEncodeCtx_Video->rc_max_rate = 4000000;
			m_pCodecEncodeCtx_Video->bit_rate_tolerance = 4000000;
			m_pCodecEncodeCtx_Video->time_base.den = frameRate;
			m_pCodecEncodeCtx_Video->time_base.num = 1;

			m_pCodecEncodeCtx_Video->width = m_iOutWidth;
			m_pCodecEncodeCtx_Video->height = m_iOutHeight;
			//pH264Encoder->pCodecCtx->frame_number = 1;
			m_pCodecEncodeCtx_Video->gop_size = 12;
			m_pCodecEncodeCtx_Video->max_b_frames = 0;
			m_pCodecEncodeCtx_Video->thread_count = 4;
			m_pCodecEncodeCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;
			m_pCodecEncodeCtx_Video->codec_id = AV_CODEC_ID_H264;
			m_pCodecEncodeCtx_Video->codec_type = AVMEDIA_TYPE_VIDEO;

			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "b-pyramid", "none", 0);
			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "preset", "superfast", 0);
			av_opt_set(m_pCodecEncodeCtx_Video->priv_data, "tune", "zerolatency", 0);

			if (m_pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
				m_pCodecEncodeCtx_Video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

			if (avcodec_open2(m_pCodecEncodeCtx_Video, pCodecEncode_Video, 0) < 0)
			{
				//编码器打开失败,退出程序
				break;
			}
		}

		if (!(m_pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
		{
			if (avio_open(&m_pFormatCtx_Out->pb, pFileOut, AVIO_FLAG_WRITE) < 0)
			{
				break;
			}
		}

		avcodec_parameters_from_context(pVideoStream->codecpar, m_pCodecEncodeCtx_Video);

		if (avformat_write_header(m_pFormatCtx_Out, NULL) < 0)
		{
			break;
		}

		iRet = 0;
	} while (0);


	if (iRet != 0)
	{
		if (m_pCodecEncodeCtx_Video != NULL)
		{
			avcodec_free_context(&m_pCodecEncodeCtx_Video);
			m_pCodecEncodeCtx_Video = NULL;
		}

		if (m_pFormatCtx_Out != NULL)
		{
			avformat_free_context(m_pFormatCtx_Out);
			m_pFormatCtx_Out = NULL;
		}
	}

	return iRet;
}


DWORD WINAPI CCropFile::VideoAReadProc(LPVOID lpParam)
{
	CCropFile *pVideoMerge = (CCropFile *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoARead();
	}
	return 0;
}

void CCropFile::VideoARead()
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	int y_size = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height;

	char *pY = new char[y_size];
	char *pU = new char[y_size / 4];
	char *pV = new char[y_size / 4];

	AVPacket packet = { 0 };
	int ret = 0;
	while (1)
	{
		av_packet_unref(&packet);

		ret = av_read_frame(m_pFormatCtx_FileA, &packet);
		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
		else if (ret == AVERROR_EOF)
		{
			break;
		}
		else if (ret < 0)
		{
			break;
		}

		ret = avcodec_send_packet(m_pReadCodecCtx_VideoA, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(m_pReadCodecCtx_VideoA, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				break;
			}
			while (1)
			{
				if (av_fifo_space(m_pVideoAFifo) >= m_iYuv420FrameSize)
				{
					///Y
					int contY = 0;
					for (int i = 0; i < pFrame->height; i++)
					{
						memcpy(pY + contY, pFrame->data[0] + i * pFrame->linesize[0], pFrame->width);
						contY += pFrame->width;
					}


					///U
					int contU = 0;
					for (int i = 0; i < pFrame->height / 2; i++)
					{
						memcpy(pU + contU, pFrame->data[1] + i * pFrame->linesize[1], pFrame->width / 2);
						contU += pFrame->width / 2;
					}


					///V
					int contV = 0;
					for (int i = 0; i < pFrame->height / 2; i++)
					{
						memcpy(pV + contV, pFrame->data[2] + i * pFrame->linesize[2], pFrame->width / 2);
						contV += pFrame->width / 2;
					}


					EnterCriticalSection(&m_csVideoASection);
					av_fifo_generic_write(m_pVideoAFifo, pY, y_size, NULL);
					av_fifo_generic_write(m_pVideoAFifo, pU, y_size / 4, NULL);
					av_fifo_generic_write(m_pVideoAFifo, pV, y_size / 4, NULL);
					LeaveCriticalSection(&m_csVideoASection);

					break;
				}
				else
				{
					Sleep(100);
				}
			}

		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
	delete[] pY;
	delete[] pU;
	delete[] pV;
}

DWORD WINAPI CCropFile::VideoCropProc(LPVOID lpParam)
{
	CCropFile *pVideoMerge = (CCropFile *)lpParam;
	if (pVideoMerge != NULL)
	{
		pVideoMerge->VideoCrop();
	}
	return 0;
}


void CCropFile::VideoCrop()
{
	int ret = 0;

	AVFrame *pFrameVideoA = av_frame_alloc();
	uint8_t *videoA_buffer_yuv420 = (uint8_t *)av_malloc(m_iYuv420FrameSize);
	av_image_fill_arrays(pFrameVideoA->data, pFrameVideoA->linesize, videoA_buffer_yuv420, AV_PIX_FMT_YUV420P, m_pReadCodecCtx_VideoA->width, m_pReadCodecCtx_VideoA->height, 1);


	int iYuv420FrameoutSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight, 1);
	AVFrame* pFrame_out = av_frame_alloc();
	uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(iYuv420FrameoutSize);
	av_image_fill_arrays(pFrame_out->data, pFrame_out->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, m_iOutWidth, m_iOutHeight, 1);

	AVPacket packet = { 0 };
	int iPicCount = 0;


	int iOriginalYSize = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height;
	int iOriginalUSize = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height / 4;
	int iOriginalVSize = m_pReadCodecCtx_VideoA->width * m_pReadCodecCtx_VideoA->height / 4;

	uint8_t *dataCropY = new uint8_t[iOriginalYSize / 2];
	uint8_t *dataCropU = new uint8_t[iOriginalUSize / 2];
	uint8_t *dataCropV = new uint8_t[iOriginalVSize / 2];

	while (1)
	{
		if (NULL == m_pVideoAFifo)
		{
			break;
		}

		int iVideoASize = av_fifo_size(m_pVideoAFifo);

		if (iVideoASize >= m_iYuv420FrameSize)
		{
			EnterCriticalSection(&m_csVideoASection);
			av_fifo_generic_read(m_pVideoAFifo, videoA_buffer_yuv420, m_iYuv420FrameSize, NULL);
			LeaveCriticalSection(&m_csVideoASection);


			pFrameVideoA->pkt_dts = pFrameVideoA->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrameVideoA->pkt_duration = 0;
			pFrameVideoA->pkt_pos = -1;

			pFrameVideoA->width = m_pReadCodecCtx_VideoA->width;
			pFrameVideoA->height = m_pReadCodecCtx_VideoA->height;
			pFrameVideoA->format = AV_PIX_FMT_YUV420P;

			
			///此处进行视频剪辑处理
			uint8_t *dataY = pFrameVideoA->data[0];
			uint8_t *dataU = pFrameVideoA->data[1];
			uint8_t *dataV = pFrameVideoA->data[2];

			for (int i = 0; i < iOriginalYSize; i += 2)
			{
				dataCropY[i / 2] = (dataY[i] + dataY[i + 1]) / 2;
			}

			for (int i = 0; i < iOriginalUSize; i += 2)
			{
				dataCropU[i / 2] = (dataU[i] + dataU[i + 1]) / 2;
			}

			for (int i = 0; i < iOriginalVSize; i += 2)
			{
				dataCropV[i / 2] = (dataV[i] + dataV[i + 1]) / 2;
			}
			
			memcpy(pFrame_out->data[0], dataCropY, iOriginalYSize / 2);
			memcpy(pFrame_out->data[1], dataCropU, iOriginalUSize / 2);
			memcpy(pFrame_out->data[2], dataCropV, iOriginalVSize / 2);

			pFrame_out->pkt_dts = pFrame_out->pts = av_rescale_q_rnd(iPicCount, m_pCodecEncodeCtx_Video->time_base, m_pFormatCtx_Out->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pFrame_out->pkt_duration = 0;
			pFrame_out->pkt_pos = -1;

			pFrame_out->width = m_iOutWidth;
			pFrame_out->height = m_iOutHeight;
			pFrame_out->format = AV_PIX_FMT_YUV420P;

			//SaveAvFrame(pFrame_out);

			ret = avcodec_send_frame(m_pCodecEncodeCtx_Video, pFrame_out);

			ret = avcodec_receive_packet(m_pCodecEncodeCtx_Video, &packet);

			av_write_frame(m_pFormatCtx_Out, &packet);

			iPicCount++;
		}
		else
		{
			if (m_hVideoAReadThread == NULL)
			{
				break;
			}
			Sleep(1);
		}
	}

	delete[] dataCropY;
	delete[] dataCropU;
	delete[] dataCropV;
	av_write_trailer(m_pFormatCtx_Out);
	avio_close(m_pFormatCtx_Out->pb);

	av_frame_free(&pFrameVideoA);
}





  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值