#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
constchar*filter_descr ="overlay=5:5";
#define ENABLE_YUV_FILE 1
AVFormatContext *input_fmt_ctx;
AVCodecContext *input_dec_ctx;
AVFormatContext *overlay_fmt_ctx;
AVCodecContext *overlay_dec_ctx;
intinput_video_stream_idx, overlay_video_stream_idx;
AVFilterGraph *filter_graph;
AVFilterInOut *inputs;
AVFilterInOut *outputs;
AVFilterContext *buffersrc_ctx;
AVFilterContext *bufferoverlay_ctx;
AVFilterContext *buffersink_ctx;
intret;
intgot_frame;
intvideo_eof_reached = 0;
intoverlay_eof_reached = 0;
intactive_stream_index = -1;
FILE* fp_yuv;
voidyuv420p_save(AVFrame *pFrame);
intvideo_transcode_step(AVFrame* mVideoFrame);
intoverlay_transcode_step(AVFrame* mOverlayFrame);
intvideo_output_eof_packet(constchar* tag,
AVStream* ist, AVFilterContext* ifilter);
staticintopen_input_file(constchar*filename)
{
intret;
AVCodec *dec;
if((ret = avformat_open_input(&input_fmt_ctx, filename, NULL, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
returnret;
}
if((ret = avformat_find_stream_info(input_fmt_ctx, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
returnret;
}
/* select the video stream */
ret = av_find_best_stream(input_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if(ret
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
returnret;
}
input_video_stream_idx = ret;
input_dec_ctx = input_fmt_ctx->streams[input_video_stream_idx]->codec;
/* init the video decoder */
if((ret = avcodec_open2(input_dec_ctx, dec, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
returnret;
}
return0;
}
staticintopen_overlay_file(constchar*filename)
{
intret;
AVCodec *dec;
if((ret = avformat_open_input(&overlay_fmt_ctx, filename, NULL, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
returnret;
}
if((ret = avformat_find_stream_info(overlay_fmt_ctx, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
returnret;
}
/* select the video stream */
ret = av_find_best_stream(overlay_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if(ret
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
returnret;
}
overlay_video_stream_idx = ret;
overlay_dec_ctx = overlay_fmt_ctx->streams[overlay_video_stream_idx]->codec;
/* init the video decoder */
if((ret = avcodec_open2(overlay_dec_ctx, dec, NULL))
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
returnret;
}
printf("overlay format = %s\n", overlay_fmt_ctx->iformat->name);
return0;
}
staticintvideo_config_input_filter(AVFilterInOut* inputs, AVFilterContext** input_filter_ctx)
{
charargs[512];
memset(args, 0, sizeof(args));
AVFilterContext *first_filter = inputs->filter_ctx;
intpad_idx = inputs->pad_idx;
AVFilter *filter = avfilter_get_by_name("buffer");
// AVRational time_base = input_dec_ctx->time_base;
AVStream* video_st = input_fmt_ctx->streams[input_video_stream_idx];
AVRational time_base = video_st->time_base;
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:sws_param=flags=%d:frame_rate=%d/%d",
input_dec_ctx->width, input_dec_ctx->height, input_dec_ctx->pix_fmt,
input_dec_ctx->time_base.num, input_dec_ctx->time_base.den,
input_dec_ctx->sample_aspect_ratio.num, input_dec_ctx->sample_aspect_ratio.den,
SWS_BILINEAR + ((video_st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0),
video_st->r_frame_rate.num, video_st->r_frame_rate.den);
printf("input args = %s\n", args);
ret = avfilter_graph_create_filter(input_filter_ctx, filter, "src_in", args, NULL, filter_graph);
if(ret
printf("video config input filter fail.\n");
return-1;
}
ret = avfilter_link(*input_filter_ctx, 0, first_filter, pad_idx);
assert(ret >= 0);
printf("video_config_input_filter avfilter_link ret = %d\n", ret);
returnret;
}
staticintvideo_config_overlay_filter(AVFilterInOut* inputs, AVFilterContext** overlay_filter_ctx )
{
charargs[512];
memset(args, 0, sizeof(args));
AVFilterContext *first_filter = inputs->filter_ctx;
intpad_idx = inputs->pad_idx;
AVFilter *filter = avfilter_get_by_name("buffer");
//AVRational time_base = overlay_dec_ctx->time_base;
AVStream* overlay_st = overlay_fmt_ctx->streams[overlay_video_stream_idx];
AVRational time_base = overlay_st->time_base;
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:sws_param=flags=%d:frame_rate=%d/%d",
overlay_dec_ctx->width, overlay_dec_ctx->height, overlay_dec_ctx->pix_fmt,
time_base.num, time_base.den,
overlay_dec_ctx->sample_aspect_ratio.num, overlay_dec_ctx->sample_aspect_ratio.den,
SWS_BILINEAR + ((overlay_st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0),
overlay_st->r_frame_rate.num, overlay_st->r_frame_rate.den);
printf("overlay args = %s\n", args);
ret = avfilter_graph_create_filter(overlay_filter_ctx, filter, "overlay_in", args, NULL, filter_graph);
if(ret
printf("video config overlay filter fail.\n");
return-1;
}
ret = avfilter_link(*overlay_filter_ctx, 0, first_filter, pad_idx);
assert(ret >= 0);
printf("video_config_overlay_filter ret = %d\n", ret);
avfilter_inout_free(&inputs);
returnret;
}
staticintvideo_config_output_filter(AVFilterInOut* outputs, AVFilterContext** out_filter_ctx)
{
charargs[512];
AVFilterContext *last_filter = outputs->filter_ctx;
intpad_idx = outputs->pad_idx;
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
intret = avfilter_graph_create_filter(out_filter_ctx, buffersink,"video_out", NULL, NULL, filter_graph);
assert(ret >= 0);
if(ret
returnret;
ret = avfilter_link(last_filter, pad_idx, *out_filter_ctx, 0);
assert(ret >= 0);
if(ret
returnret;
avfilter_inout_free(&outputs);
return0;
}
staticintinit_input_filters()
{
filter_graph->scale_sws_opts = av_strdup("flags=0x4");
av_opt_set(filter_graph, "aresample_swr_opts","", 0);
ret = avfilter_graph_parse2(filter_graph, filter_descr, &inputs, &outputs);
assert(inputs && inputs->next && !inputs->next->next);
ret = video_config_input_filter(inputs, &buffersrc_ctx);
ret = video_config_overlay_filter(inputs->next, &bufferoverlay_ctx);
returnret;
}
staticintinit_output_filters()
{
returnvideo_config_output_filter(outputs, &buffersink_ctx);
}
intreap_filters() {
AVFilterBufferRef *picref;
while(1) {
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, AV_BUFFERSINK_FLAG_NO_REQUEST);
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
//printf("reap_filters fail ret = %d\n", ret);
return0;// no frame filtered.
}
printf("samplesref -------------------\n");
AVFrame* filtered_frame = avcodec_alloc_frame();
avcodec_get_frame_defaults(filtered_frame);
avfilter_copy_buf_props(filtered_frame, picref);
yuv420p_save(filtered_frame);
avfilter_unref_bufferp(&picref);
}
}
inttranscode_from_filter(AVFilterContext** ifilters,int* eof_reached_arr,int* active_stream_indext) {
intret = 0;
ret = avfilter_graph_request_oldest(filter_graph);
if(ret >= 0) {
returnret;
}
if(ret == AVERROR_EOF) {
returnret;
}
if(ret != AVERROR(EAGAIN)) {
returnret;
}
intnb_requests_max = 0;
inti;
for(i = 0; i
inteof_reached = eof_reached_arr[i];
if(eof_reached) {
continue;
}
AVFilterContext* ifilter = ifilters[i];
intnb_requests = av_buffersrc_get_nb_failed_requests(ifilter);
if(nb_requests > nb_requests_max) {
nb_requests_max = nb_requests;
*active_stream_indext = i;
}
}
returnret;
}
intmain()
{
avcodec_register_all();
av_register_all();
avfilter_register_all();
avformat_network_init();
char* video_file ="outFileSrc.mp4";
char* overlay_video_file ="my_logo.png";// light1.mp4
#if ENABLE_YUV_FILE
constchar* yuvFile ="outWater.yuv";
fp_yuv = fopen(yuvFile, "wb");
#endif
open_input_file(video_file);
open_overlay_file(overlay_video_file);
filter_graph = avfilter_graph_alloc();
if(!filter_graph) {
printf("filter graph alloc fail.\n");
return-1;
}
init_input_filters();
init_output_filters();
if((ret = avfilter_graph_config(filter_graph, NULL))
returnret;
AVFrame* mVideoFrame = avcodec_alloc_frame();
AVFrame* mOverlayFrame = avcodec_alloc_frame();
while(1) {
if(video_eof_reached && overlay_eof_reached) {
printf("stream EOF.\n");
break;
}
AVFilterContext* ifilters[] = {buffersrc_ctx, bufferoverlay_ctx};
inteof_reacheds[] = {video_eof_reached, overlay_eof_reached};
ret = transcode_from_filter(ifilters, eof_reacheds, &active_stream_index);
if(ret >= 0) {
ret = reap_filters();
assert(ret >= 0);
continue;
}
if(ret == AVERROR_EOF) {
ret = reap_filters();
assert(ret >= 0);
continue;
}
if(ret == AVERROR(EAGAIN) && active_stream_index
continue;
}
assert(active_stream_index >= 0);
printf("active_stream_index = %d\n", active_stream_index);
if(active_stream_index == 0) {
video_transcode_step(mVideoFrame);
continue;
}
overlay_transcode_step(mOverlayFrame);
}
if(input_dec_ctx)
avcodec_close(input_dec_ctx);
avformat_close_input(&input_fmt_ctx);
if(overlay_dec_ctx)
avcodec_close(overlay_dec_ctx);
avformat_close_input(&overlay_fmt_ctx);
printf("my_filtering_video3 end -------\n");
return0;
}
intvideo_transcode_step(AVFrame* mVideoFrame) {
intret = 0;
AVPacket pkt;
ret = av_read_frame(input_fmt_ctx, &pkt);
if(ret == AVERROR(EAGAIN)) {
return0;
}
if(ret
video_eof_reached = 1;
assert(ret == AVERROR_EOF);
ret = video_output_eof_packet("video_eof", input_fmt_ctx->streams[input_video_stream_idx], buffersrc_ctx);
assert (ret >= 0);
returnret;
}
if(pkt.stream_index != input_video_stream_idx) {
// av_free(&pkt);
returnret;
}
ret = avcodec_decode_video2(input_dec_ctx, mVideoFrame, &got_frame, &pkt);
if(ret
printf("Error decoding input video\n");
}
if(got_frame) {
int64_t best_effort_timestamp = av_frame_get_best_effort_timestamp(mVideoFrame);
mVideoFrame->pts = best_effort_timestamp;
if(av_buffersrc_add_frame(buffersrc_ctx, mVideoFrame, AV_BUFFERSRC_FLAG_PUSH)
av_log(NULL, AV_LOG_ERROR, "Error while feeding the video filtergraph\n");
return-1;
}
reap_filters();
}
return0;
}
intoverlay_transcode_step(AVFrame* mOverlayFrame) {
intret = 0;
AVPacket pkt;
ret = av_read_frame(overlay_fmt_ctx, &pkt);
if(ret == AVERROR(EAGAIN)) {
return0;
}
if(ret
overlay_eof_reached = 1;
ret = video_output_eof_packet("overlay_eof", overlay_fmt_ctx->streams[input_video_stream_idx], bufferoverlay_ctx);
assert(ret >=0 );
returnret;
}
if(pkt.stream_index != overlay_video_stream_idx) {
av_free_packet(&pkt);
returnret;
}
ret = avcodec_decode_video2(overlay_dec_ctx, mOverlayFrame, &got_frame, &pkt);
if(ret
printf("Error decoding overlay video\n");
}
if(got_frame) {
int64_t best_effort_timestamp = av_frame_get_best_effort_timestamp(mOverlayFrame);
mOverlayFrame->pts = best_effort_timestamp;
if(av_buffersrc_add_frame(bufferoverlay_ctx, mOverlayFrame, AV_BUFFERSRC_FLAG_PUSH)
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
return-1;
}
}
return0;
}
/**
* output EOF packet to filter to flush
*/
intvideo_output_eof_packet(constchar* tag,
AVStream* ist, AVFilterContext* ifilter)
{
intret = 0;
// alloc frame if NULL
AVFrame* decoded_frame = avcodec_alloc_frame();
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
intgot_frame = 0;
ret = avcodec_decode_video2(ist->codec, decoded_frame, &got_frame, &pkt);
// EOF, assert got nothing and ret is 0.
// TODO: here we still got frame, different to ffmpeg.
assert(ret >= 0);
// flush filter
av_buffersrc_add_ref(ifilter, NULL, 0);
printf("[%s] filter -> eof packet.\n", tag);
returnret;
}
/**
* save yuv420p frame [YUV]
*/
voidyuv420p_save(AVFrame *pFrame)
{
inti = 0;
intwidth = pFrame->width, height = pFrame->height;
intheight_half = height / 2, width_half = width / 2;
inty_wrap = pFrame->linesize[0];
intu_wrap = pFrame->linesize[1];
intv_wrap = pFrame->linesize[2];
unsigned char*y_buf = pFrame->data[0];
unsigned char*u_buf = pFrame->data[1];
unsigned char*v_buf = pFrame->data[2];
//save y
for(i = 0; i
fwrite(y_buf + i * y_wrap, 1, width, fp_yuv);
//save u
for(i = 0; i
fwrite(u_buf + i * u_wrap, 1, width_half, fp_yuv);
//save v
for(i = 0; i
fwrite(v_buf + i * v_wrap, 1, width_half, fp_yuv);
fflush(fp_yuv);
}