FFmpeg过滤器框架分析-视频video

大概框架:首先创建filter,通过avfilter_link,将各个创建好的filter按⾃⼰想要的次序链接到⼀起,然后avfilter_graph_config之后,就可以正常使⽤。⽐较常⽤的滤镜有:scale、trim、overlay、rotate、movie、yadif。scale 滤镜⽤于缩放,trim 滤镜⽤于帧级剪切,overlay 滤镜⽤于视频叠加,rotate 滤镜实现旋转,movie 滤镜可以加载第三⽅的视频,yadif 滤镜可以去隔⾏。

一、主要结构体和API介绍

AVFilterGraph-对filters系统的整体管理

重点
struct AVFilterGraph{
AVFilterContext **filters;
unsigned nb_filters;
}

AVFilter-定义filter本身的能⼒

const char *name; 
const AVFilterPad *inputs;
const AVFilterPad *outputs;

AVFilterContext-filter实例,管理filter与外部的联系

// filter实例,管理filter与外部的联系
重点
struct AVFilterContext
{
const AVFilter *filter;
char *name;
AVFilterPad *input_pads;
AVFilterLink **inputs;
unsigned nb_inputs
AVFilterPad *output_pads;
AVFilterLink **outputs;
unsigned nb_outputs;
struct AVFilterGraph *graph; // 从属于哪个AVFilterGraph
}

AVFilterLink-定义两个filters之间的联接

struct AVFilterLink
{
AVFilterContext *src;
AVFilterPad *srcpad;
AVFilterContext *dst;
AVFilterPad *dstpad;
struct AVFilterGraph *graph;

}

 AVFilterPad-定义filter的输⼊/输出接⼝

// 定义filter的输⼊/输出接⼝
重点
struct AVFilterPad
{
const char *name;
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);

int (*request_frame)(AVFilterLink *link);
}

在AVFilter模块中定义了AVFilter结构,每个AVFilter都是具有独⽴功能的节点,如scale filter的作⽤就是进⾏图像尺⼨变换,overlay filter的作⽤就是进⾏图像的叠加。

这⾥需要重点提的是两个特别的filter,⼀个是buffer,⼀个是buffersink,滤波器buffer代表filter graph中的源头,原始数据就往这个filter节点输⼊的;⽽滤波器buffersink代表filter graph中的输出节点,处理完成的数据从这个filter节点输出。

二、 函数使⽤

// 获取FFmpeg中定义的filter,调⽤该⽅法前需要先调⽤avfilter_register_all();进⾏滤波器注册
AVFilter avfilter_get_by_name(const char name);
// 往源滤波器buffer中输⼊待处理的数据
int av_buffersrc_add_frame(AVFilterContext ctx, AVFrame frame);
// 从⽬的滤波器buffersink中获取处理完的数据
int av_buffersink_get_frame(AVFilterContext ctx, AVFrame frame);
// 创建⼀个滤波器图filter graph
AVFilterGraph *avfilter_graph_alloc(void);
// 创建⼀个滤波器实例AVFilterContext,并添加到AVFilterGraph中
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char name, const char args, void *opaque,
AVFilterGraph *graph_ctx);

// 连接两个滤波器节点
int avfilter_link(AVFilterContext *src, unsigned srcpad,
AVFilterContext *dst, unsigned dstpad);

三、AVFilter主体框架流程

基本结构

我们把⼀整个滤波的流程称为滤波过程。下⾯是⼀个滤波过程的结构

图中简要指示出了滤波所⽤到的各个结构体,各个结构体有如下作⽤:

在利⽤AVFilter进⾏⾳视频数据处理前先将在进⾏的处理流程绘制出来,现在以FFmpeg filter官⽅⽂档中的⼀个例⼦为例进⾏说明。

这个例⼦的处理流程如上所示,⾸先使⽤split滤波器将input流分成两路流(main和tmp),然后分别对两路流进⾏处理。对于tmp流,先经过crop滤波器进⾏裁剪处理,再经过flip滤波器进⾏垂直⽅向上的翻转操作,输出的结果命名为flip流。再将main流和flip流输⼊到overlay滤波器进⾏合成操作。上图的input就是上⾯提过的buffer源滤波器,output就是上⾯的提过的buffersink滤波器。上图中每个节点都是⼀个AVFilterContext,每个连线就是AVFliterLink。所有这些信息都统一由AVFilterGraph来管理。

流程可视化就是将视频下半部分翻转

原视频

处理后

附上c++代码

#include<iostream>
#include<fstream>
#include<string>
#include<sstream>
extern "C" {
#include<stdio.h>
#include<libavformat/avformat.h>
#include<libavfilter/avfilter.h>
#include<libavfilter/buffersink.h>
#include<libavfilter/buffersrc.h>
#include<libavutil/opt.h>
#include<libavutil/imgutils.h>
}
using namespace std;

int main() {
	int ret = 0;
	ifstream inFile;
	const string inFileName = "768x320.yuv";
	inFile.open(inFileName, ios::binary);
	if (!inFile.is_open()) {
		cout << "open file " << inFileName << " failed" << endl;
		return -1;
	}
	int in_width = 768;
	int in_height = 320;

	ofstream outFile;
	const string outFileName = "out_crop_vfilter.yuv";
	outFile.open(outFileName, ios::binary);
	if (!outFile.is_open()) {
		cout << "create file " << outFileName << " failed" << endl;
		return -1;
	}
	//创建并初始化过滤器框架
	AVFilterGraph* filter_graph = avfilter_graph_alloc();
	if (!filter_graph) {
		cout << "Fail to cteate filter graph!" << endl;
		return -1;
	}
	//设置入口过滤器buffer的初始化参数
	stringstream args;
	args << "video_size=" << in_width << "x" << in_height
		<< ":pix_fmt=" << AV_PIX_FMT_YUV420P
		<< ":time_base=" << 1 << "/" << 25
		<< ":pixel_aspect=" << 1 << "/" << 1;
	cout << args.str() << endl;
	const AVFilter* bufferSrc = avfilter_get_by_name("buffer");
	AVFilterContext* bufferSrc_ctx;
	//在过滤器框架中创建过滤器buffer,过滤器上下文维护过滤器
	ret = avfilter_graph_create_filter(&bufferSrc_ctx, bufferSrc, "in",
		args.str().c_str(), nullptr, filter_graph);
	if (ret < 0) {
		cout << "Failed to create filter bufferSrc" << endl;
		return -1;
	}

	//创建过滤器出口buffersink
	AVBufferSinkParams* bufferSink_params;
	AVFilterContext* bufferSink_ctx;
	const AVFilter* bufferSink = avfilter_get_by_name("buffersink");
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P,AV_PIX_FMT_NONE };
	bufferSink_params = av_buffersink_params_alloc();
	bufferSink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&bufferSink_ctx, bufferSink, "out",
		nullptr, bufferSink_params, filter_graph);
	if (ret < 0) {
		cout << "Fail to create filter sink filter" << endl;
		return -1;
	}
	//创建过滤器split,将图片复制为2份
	const AVFilter* splitFilter = avfilter_get_by_name("split");
	AVFilterContext* splitFilter_ctx;
	ret = avfilter_graph_create_filter(&splitFilter_ctx, splitFilter, "split",
		"outputs=2", nullptr, filter_graph);
	if (ret < 0) {
		cout << "Fail to create split filter" << endl;
		return -1;
	}
	//创建过滤器crop,裁剪图片
	const AVFilter* cropFilter = avfilter_get_by_name("crop");
	AVFilterContext* cropFilter_ctx;
	ret = avfilter_graph_create_filter(&cropFilter_ctx, cropFilter, "crop",
		"out_w=iw:out_h=ih/2:x=0:y=0", nullptr, filter_graph);
	if (ret < 0) {
		cout << "Fail to create crop filter" << endl;
		return -1;
	}
	//创建过滤器vflip,镜像翻转
	const AVFilter* vflipFilter = avfilter_get_by_name("vflip");
	AVFilterContext* vflipFilter_ctx;
	ret = avfilter_graph_create_filter(&vflipFilter_ctx, vflipFilter, "vflip",
		nullptr, nullptr, filter_graph);
	if (ret < 0) {
		cout << "Fail to create vflip filter" << endl;
		return -1;
	}
	//创建过滤器overlay,融合图片
	const AVFilter* overlayFilter = avfilter_get_by_name("overlay");
	AVFilterContext* overlayFilter_ctx;
	ret = avfilter_graph_create_filter(&overlayFilter_ctx, overlayFilter, "overlay",
		"y=0:H/2", nullptr, filter_graph);
	if (ret < 0) {
		cout << "Fail to create overlay filter" << endl;
		return -1;
	}
	//连接buffer->split
	ret = avfilter_link(bufferSrc_ctx, 0, splitFilter_ctx, 0);
	if (ret != 0) {
		cout << "Fail to link src filter and split filter" << endl;
		return -1;
	}
	//split[0]->overlay
	ret = avfilter_link(splitFilter_ctx, 0, overlayFilter_ctx, 0);
	if (ret != 0) {
		cout << "Fail to link split filter and overlay filter" << endl;
		return -1;
	}
	//split[1]->crop
	ret = avfilter_link(splitFilter_ctx, 1, cropFilter_ctx, 0);
	if (ret != 0) {
		cout << "Fail to link split filter and crop filter" << endl;
		return -1;
	}
	//crop->vflip
	ret = avfilter_link(cropFilter_ctx, 0, vflipFilter_ctx, 0);
	if (ret != 0) {
		cout << "Fail to link crop filter and vflip filter" << endl;
		return -1;
	}
	//vflip->overlay
	ret = avfilter_link(vflipFilter_ctx, 0, overlayFilter_ctx, 1);
	if (ret != 0) {
		cout << "Fail to link vfilp filter and overlay filter" << endl;
		return -1;
	}
	//overlay->buffersink
	ret = avfilter_link(overlayFilter_ctx, 0, bufferSink_ctx, 0);
	if (ret != 0) {
		cout << "Fail to link overlay filter and buffersink filter" << endl;
		return -1;
	}
	//配置并初始化 AVFilterGraph,在所有过滤器和过滤器链接添加到过滤器图后调用目的是确保所有的链接和参数都正确并准备好进行处理
	ret = avfilter_graph_config(filter_graph, nullptr);
	if (ret < 0) {
		cout << "fail in filter graph" << endl;
		return -1;
	}
	//将过滤器图框架保存到文件
	char* graph_str = avfilter_graph_dump(filter_graph, nullptr);
	ofstream graphFile("graphFile.txt");
	graphFile.write(graph_str, strlen(graph_str));

	AVFrame* frame_in = av_frame_alloc();
	int in_buffer_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1);
	unsigned char* frame_buffer_in = (unsigned char*)av_malloc(in_buffer_size);
	//分配好帧内存,此时帧内无数据
	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

	AVFrame* frame_out = av_frame_alloc();
	int out_buffer_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1);
	unsigned char* frame_buffer_out = (unsigned char*)av_malloc(out_buffer_size);
	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

	frame_in->width = in_width;
	frame_in->height = in_height;
	frame_in->format = AV_PIX_FMT_YUV420P;
	uint32_t frame_count = 0;
	//按帧循环读取数据
	while (1)
	{
		//yuv420格式的一帧占用大小in_width * in_height * 3 / 2
		inFile.read((char*)frame_buffer_in, in_width * in_height * 3 / 2);
		if (inFile.gcount() != in_width * in_height * 3 / 2) {
			cout << "read finish" << endl;
			break;
		}

		//input Y,U,V
		frame_in->data[0] = frame_buffer_in;
		frame_in->data[1] = frame_buffer_in + in_width * in_height;
		frame_in->data[2] = frame_buffer_in + in_width * in_height * 5 / 4;
		//输入入口过滤器
		if (av_buffersrc_add_frame(bufferSrc_ctx, frame_in) < 0) {
			cout << "Error while add frame" << endl;
			break;
		}
		//内部自行处理,出口获取处理帧
		ret = av_buffersink_get_frame(bufferSink_ctx, frame_out);
		if (ret < 0) {
			break;
		}
		//写入文件
		if (frame_out->format == AV_PIX_FMT_YUV420P) {
			for (int i = 0; i < frame_out->height; i++) {
				outFile.write(reinterpret_cast<char*>(frame_out->data[0] + frame_out->linesize[0] * i), frame_out->width);
			}
			for (int i = 0; i < frame_out->height / 2; i++) {
				outFile.write(reinterpret_cast<char*>(frame_out->data[1] + frame_out->linesize[1] * i), frame_out->width / 2);
			}
			for (int i = 0; i < frame_out->height / 2; i++) {
				outFile.write(reinterpret_cast<char*>(frame_out->data[2] + frame_out->linesize[2] * i), frame_out->width / 2);
			}
		}
		++frame_count;
		if (frame_count % 25 == 0) {
			cout << "Process " << frame_count << " frame" << endl;
		}
		//清空
		av_frame_unref(frame_out);
	}


	inFile.close();
	outFile.close();
	graphFile.close();
	av_frame_free(&frame_in);
	av_frame_free(&frame_out);
	avfilter_graph_free(&filter_graph);
	system("pause");
}

当滤波过程复杂到⼀定程度时,即需要多个滤波器进⾏复杂的连接来实现整个滤波过程,这时候对于调⽤者来说,继续采⽤上述⽅法来构建滤波图就显得不够效率。对于复杂的滤波过程,ffmpeg提供了⼀个更为⽅便的滤波过程创建⽅式。这种复杂的滤波器过程创建⽅式要求⽤户以字符串的⽅式描述各个滤波器之间的关系。如下是⼀个描述复杂滤波过程的字符串的例⼦:

[0]trim=start_frame=10:end_frame=20[v0];\
[0]trim=start_frame=30:end_frame=40[v1];\
[v0][v1]concat=n=2[v2];\
[1]hflip[v3];\
[v2][v3]overlay=eof_action=repeat[v4];\
[v4]drawbox=50:50:120:120:red:t=5[v5]

以上是⼀个连续的字符串,为了⽅便分析我们把该字符串进⾏了划分,每⼀⾏都是⼀个滤波器实例,对于每⼀⾏:
1. 开头是⼀对中括号,中括号内的是输⼊的标识名0。
2. 中括号后⾯接着的是滤波器名称trim。
3. 名称后的第⼀个等号后⾯是滤波器参数start_frame=10:end_frame=20,这⾥有两组参数,两组参数⽤冒号分开。
4. 第⼀组参数名称为start_frame,参数值为10,中间⽤等号分开。
5. 第⼆组参数名称为end_frame,参数值为20,中间⽤等号分开。
6. 最后也有⼀对中括号,中括号内的是输出的标识名v0。
7. 如果⼀个滤波实例的输⼊标识名与另⼀个滤波实例的输出标识名相同,则表示这两个滤波实例构成滤波链。
8. 如果⼀个滤波实例的输⼊标识名或者输出标识名⼀直没有与其它滤波实例的输出标识名或者输⼊标识名相同,则表明这些为外部的输⼊输出,通常我们会为其接上buffersrc以及buffersink。
按照这种规则,上⾯的滤波过程可以被描绘成以下滤波图:

ffmpeg提供⼀个函数⽤于解析这种字符串:avfilter_graph_parse2。这个函数会把输⼊的字符串⽣成如上⾯的滤波图,不过我们需要⾃⾏⽣成buffersrc以及buffersink的实例,并通过该函数提供的输⼊以及输出接⼝把buffersrc、buffersink与该滤波图连接起来。

例如,对于上面给出的视频分割翻转的例子,我们只需要

//编写字符串描述整个流程
snprintf(filter_args, sizeof(filter_args),
		"buffer=video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d[v0];" //Parsed_buffer_0
		"[v0]split[main][tmp];"        // Parsed_split_1
		"[tmp]crop=iw:ih/2:0:0,vflip[flip];"   // Parsed_crop_2 Parsed_vflip_3
		"[main][flip]overlay=0:H/2[result];" // Parsed_overlay_4
		"[result]buffersink", // Parsed_buffersink_5
		width, height, format, 1, 25, 1, 1);
//调用avfilter_graph_parse2即可完成整个过滤流程配置
ret = avfilter_graph_parse2(filter_graph, filter_args, &inputs, &outputs);

就能够取得同样的效果。

附上完整c++代码

#include<iostream>
#include<fstream>
using namespace std;
extern "C" {
#include<stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

AVFilterContext* mainsrc_ctx = nullptr;
AVFilterContext* resultsink_ctx = nullptr;
AVFilterGraph* filter_graph = nullptr;

int Init_filters(const int width, const int height, const int format) {
	int ret = 0;
	AVFilterInOut* inputs = nullptr;
	AVFilterInOut* outputs = nullptr;
	char filter_args[1024] = { 0 };

	filter_graph = avfilter_graph_alloc();
	if (!filter_graph) {
		cout << "Error: avfilter_graph_alloc failed." << endl;
		return -1;
	}

	snprintf(filter_args, sizeof(filter_args),
		"buffer=video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d[v0];" //Parsed_buffer_0
		"[v0]split[main][tmp];"        // Parsed_split_1
		"[tmp]crop=iw:ih/2:0:0,vflip[flip];"   // Parsed_crop_2 Parsed_vflip_3
		"[main][flip]overlay=0:H/2[result];" // Parsed_overlay_4
		"[result]buffersink", // Parsed_buffersink_5
		width, height, format, 1, 25, 1, 1);
	cout << filter_args << endl;

	ret = avfilter_graph_parse2(filter_graph, filter_args, &inputs, &outputs);
	if (ret < 0) {
		cout << "Cannot prase graph" << endl;
		return -1;
	}

	ret = avfilter_graph_config(filter_graph, nullptr);
	if (ret < 0) {
		cout << "Cannot configure graph" << endl;
		return -1;
	}

	mainsrc_ctx = avfilter_graph_get_filter(filter_graph, "Parsed_buffer_0");
	if (!mainsrc_ctx) {
		cout << "avfilter_graph_get_filter Parsed_buffer_0 failed." << endl;
		return -1;
	}

	resultsink_ctx = avfilter_graph_get_filter(filter_graph, "Parsed_buffersink_5");
	if (!resultsink_ctx) {
		cout << "avfilter_graph_get_filter Parsed_buffersink_5 failed." << endl;
		return -1;
	}
	cout << "sink_width: " << av_buffersink_get_w(resultsink_ctx)
		<< ", sink_height: " << av_buffersink_get_h(resultsink_ctx) << endl;

	return 0;
}

int main() {
	int ret = 0;
	int in_width = 768;
	int in_height = 320;

	if (Init_filters(in_width, in_height, AV_PIX_FMT_YUV420P) < 0) {
		cout << "Init_filters failed." << endl;
		return -1;
	}

	ifstream inFile("768x320.yuv", ios::binary);
	if (!inFile.is_open()) {
		cout << "open infile 768x320.yuv failed." << endl;
		return -1;
	}
	ofstream outFile("out_crop_vfliper_2.yuv", ios::binary);
	if (!outFile.is_open()) {
		cout << "open outfile out_crop_vfliper.yuv failed." << endl;
		return -1;
	}

	string graph_str = avfilter_graph_dump(filter_graph, nullptr);
	ofstream graph_File("graphFile.txt");
	graph_File.write(graph_str.c_str(), strlen(graph_str.c_str()));
	graph_File.close();

	AVFrame* frame_in = av_frame_alloc();
	unsigned char* frame_buffer_in = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, 
		in_width, in_height, 1));

	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in, AV_PIX_FMT_YUV420P,
		in_width, in_height, 1);

	AVFrame* frame_out = av_frame_alloc();
	unsigned char* frame_buffer_out = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
		in_width, in_height, 1));

	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out, AV_PIX_FMT_YUV420P,
		in_width, in_height, 1);

	frame_in->width = in_width;
	frame_in->height = in_height;
	frame_in->format = AV_PIX_FMT_YUV420P;
	uint32_t frame_count = 0;
	while (1)
	{
		inFile.read((char*)frame_buffer_in, in_width * in_height * 3 / 2);
		if (inFile.gcount() != in_width * in_height * 3 / 2) {
			break;
		}
		frame_in->data[0] = frame_buffer_in;
		frame_in->data[1] = frame_buffer_in + in_width * in_height;
		frame_in->data[2] = frame_buffer_in + in_width * in_height * 5 / 4;

		if (av_buffersrc_add_frame(mainsrc_ctx, frame_in) < 0) {
			cout << "Error while add frame." << endl;
			break;
		}
		ret = av_buffersink_get_frame(resultsink_ctx, frame_out);
		if (ret < 0) {
			break;
		}

		if (frame_out->format == AV_PIX_FMT_YUV420P) {
			for (int i = 0; i < frame_out->height; i++) {
				outFile.write((char*)(frame_out->data[0] + frame_out->linesize[0] * i), frame_out->width);
			}
			for (int i = 0; i < frame_out->height/2; i++) {
				outFile.write((char*)(frame_out->data[1] + frame_out->linesize[1] * i), frame_out->width/2);
			}
			for (int i = 0; i < frame_out->height/2; i++) {
				outFile.write((char*)(frame_out->data[2] + frame_out->linesize[2] * i), frame_out->width/2);
			}
		}
		++frame_count;
		if (frame_count % 25 == 0)
			cout << "Process " << frame_count << " frame!" << endl;
		av_frame_unref(frame_out);
	}
	inFile.close();
	outFile.close();
	av_frame_free(&frame_in);
	av_frame_free(&frame_out);
	avfilter_graph_free(&filter_graph);
	cout << "finish." << endl;

	system("pause");
	return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值