ffmpeg avfilter 具有强大的处理功能,如视频水印、Overlay、视频补帧、抠图、音频流重采样等功能,是一个值得深入研究的模块。
其中最简单版本为雷霄骅提供的水印功能(https://blog.csdn.net/leixiaohua1020/article/details/29368911),因为原始的YUV数据没有时间戳的功能,需要手动的添加时间戳,这里进行补充记录下,其过程请参考上面的雷大神的原始链接。
int main(int argc, char *argv[])
{
int ret;
AVFrame *frame_in = NULL;
AVFrame *frame_out = NULL;
unsigned char *frame_buffer_in = NULL;
unsigned char *frame_buffer_out = NULL;
//
AVFilterContext *buffersink_ctx = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterGraph *filter_graph = NULL;
static int video_stream_index = -1;
//Input YUV
FILE *fp_in = fopen("test.yuv", "rb+");
if (fp_in == NULL)
{
printf("Error open input file.\n");
return -1;
}
int in_width = 832;
int in_height = 480;
//Output YUV
FILE *fp_out = fopen("output.yuv", "wb+");
if (fp_out == NULL)
{
printf("Error open output file.\n");
return -1;
}
//过程一:构建Filter Graph
//步骤1:注册所有filter,后续才能通过 "buffer" 等识别到具体的Filter
//ffmpeg4.0 不再需要提前注册
//avfilter_register_all();
//filter 功能描述
//const char *filter_descr = "[in]overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0[out]";
//const char *filter_descr = "boxblur";
//const char *filter_descr = "drawtext=fontfile=arial.ttf:fontcolor=red:fontsize=30:text='Lei Xiaohua':x=50:y=50";
//const char *filter_descr = "scale=300:100[wm];[in][wm]overlay=50:50[out]";
//const char *filter_descr = "[in]scale=300:100[scl];[in1][scl]overlay=25:25";
//const char *filter_descr = "scale=832:480,transpose=cclock";
//scale = 78:24[scl]; [scl] transpose = cclock // assumes "[in]" and "[out]" to be input output pads respectively
//const char *filter_descr = "crop=iw/2:ih:0:0,split[left][tmp];[tmp]hflip[right]; [left]pad=iw*2[a];[a][right]overlay=w";
//const char *filter_descr = "movie=test.jpg[wm];[in][wm]overlay=5:5[out]";
const char *filter_descr = "movie=my_logo.png[wm];[in][wm]overlay=5:5[out]";
//const char *filter_descr = "drawtext=fontfile = arial.ttf:x = w - tw : fontcolor = white : fontsize = 30 : text = '%{localtime\:%H\\\:%M\\\:%S";
char args[512];
//定义:特殊的filter:buffer表示filter graph中的输入filter,原始数据就往这个节点输入
//定义:特殊的filter:buffer sink 表示filter graph中的输出filter,处理后的数据从这个节点输出
//AVFilter **buffersrc = (AVFilter**)av_malloc( sizeof(AVFilter*));
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
//输入输出端口
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
//定义:
AVBufferSinkParams *buffersink_params;
//步骤2:开辟内存空间
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
in_width, in_height, AV_PIX_FMT_YUV420P,1, 25, 1, 1);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
if (ret < 0)
{
printf("Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts; //理论上是指:所支持的视频格式,但源码中,这个参数传进去没有用到
// 创建一个滤波器实例AVFilterContext,并添加到AVFilterGraph中
//buffersink_params 貌似没啥用
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",NULL, buffersink_params, filter_graph);
//ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
av_free(buffersink_params);
if (ret < 0)
{
printf("Cannot create buffer sink\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
//将一串通过字符串描述的Graph添加到FilterGraph中
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
//过程二:处理数据frame
frame_in = av_frame_alloc();
frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,AV_PIX_FMT_YUV420P, in_width, in_height, 1);
frame_out = av_frame_alloc();
frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,AV_PIX_FMT_YUV420P, in_width, in_height, 1);
frame_in->width = in_width;
frame_in->height = in_height;
frame_in->format = AV_PIX_FMT_YUV420P;
while (1)
{
if (fread(frame_buffer_in, 1, in_width*in_height * 3 / 2, fp_in) != in_width*in_height * 3 / 2)
{
break;
}
//input Y,U,V
frame_in->data[0] = frame_buffer_in;
frame_in->data[1] = frame_buffer_in + in_width*in_height;
frame_in->data[2] = frame_buffer_in + in_width*in_height * 5 / 4;
//在使用movie添加水印时需要更新frame的时间戳
frame_in->pts++;
frame_in->pts = frame_in->pts * 40;
// 往源滤波器buffer中输入待处理的数据
if (av_buffersrc_add_frame(buffersrc_ctx, frame_in) < 0)
{
printf("Error while add frame.\n");
break;
}
// 从目的滤波器buffersink中输出处理完的数据
ret = av_buffersink_get_frame(buffersink_ctx, frame_out);
if (ret < 0)
break;
//output Y,U,V
if (frame_out->format == AV_PIX_FMT_YUV420P)
{
for (int i = 0; i<frame_out->height; i++)
{
fwrite(frame_out->data[0] + frame_out->linesize[0] * i, 1, frame_out->width, fp_out);
}
for (int i = 0; i<frame_out->height / 2; i++)
{
fwrite(frame_out->data[1] + frame_out->linesize[1] * i, 1, frame_out->width / 2, fp_out);
}
for (int i = 0; i<frame_out->height / 2; i++)
{
fwrite(frame_out->data[2] + frame_out->linesize[2] * i, 1, frame_out->width / 2, fp_out);
}
}
printf("Process 1 frame!\n");
av_frame_unref(frame_out);
}
fclose(fp_in);
fclose(fp_out);
av_frame_free(&frame_in);
av_frame_free(&frame_out);
avfilter_graph_free(&filter_graph);
return 0;
}