ffmpeg视频添加filter-flv
具体问题请看代码及代码注释。
//.h
#ifndef __FFMPEG_H__
#define __FFMPEG_H__
#include "info.h"
extern "C"
{
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/mathematics.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/samplefmt.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavutil/error.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "inttypes.h"
#include "stdint.h"
};
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
#define INPUTURL "../flv/test.flv"
#define WATERMARKURL "../png/1.png"
#define OUTPUTURL "../yuv/test.yuv"
extern AVFormatContext * m_in_format_ctx;
extern int video_stream_idx;
extern AVFilterContext * buffersink_ctx;
extern AVFilterContext * buffersrc_ctx;
extern AVFilterGraph * filter_graph;
void initialize_all();
int init_input(char * filename,AVFormatContext ** in_format);
int open_codec_context(int *stream_idx, AVFormatContext ** in_format_ctx, enum AVMediaType type);
void uinit_input(AVFormatContext * in_format_ctx);
int read_frame(AVFormatContext * in_format,char * filename);
int init_filters(const char *filters_descr);
void uinit_filters();
#endif
#include "ffmpeg.h"
AVFormatContext * m_in_format_ctx = NULL;
int video_stream_idx = -1;
AVFilterContext * buffersrc_ctx = NULL;
AVFilterContext * buffersink_ctx = NULL;
AVFilterGraph * filter_graph = NULL;
void initialize_all()
{
av_register_all();
avcodec_register_all();
avfilter_register_all();
}
int init_input(char * filename,AVFormatContext ** in_format_ctx)
{
int ret = 0;
ret = avformat_open_input(in_format_ctx, filename,NULL, NULL);
if (ret != 0)
{
printf("Call avformat_open_input function failed!\n");
return 0;
}
if (av_find_stream_info(*in_format_ctx) < 0)
{
printf("Call av_find_stream_info function failed!\n");
return 0;
}
ret = open_codec_context(&video_stream_idx, in_format_ctx, AVMEDIA_TYPE_VIDEO);
if (ret != 1)
{
printf("Call open_codec_context function failed!\n");
return 0;
}
//The output of input information
av_dump_format(*in_format_ctx, -1, filename, 0);
return 1;
}
int open_codec_context(int *stream_idx, AVFormatContext ** in_format_ctx, enum AVMediaType type)
{
int ret = 0;
AVStream * st = NULL;
AVCodecContext * dec_ctx = NULL;
AVCodec * dec = NULL;
ret = av_find_best_stream((*in_format_ctx), type, -1, -1, NULL, 0);
if (ret < 0)
{
printf("Call av_find_best_stream function failed!\n");
return 0;
}
else
{
*stream_idx = ret;
st = (*in_format_ctx)->streams[*stream_idx];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec)
{
return AVERROR(EINVAL);
}
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0)
{
printf("Call avcodec_open2 function failed!\n");
return 0;
}
}
return 1;
}
void uinit_input(AVFormatContext * in_format_ctx)
{
avcodec_close(in_format_ctx->streams[video_stream_idx]->codec);
avformat_close_input(&in_format_ctx);
av_free(in_format_ctx);
}
int read_frame(AVFormatContext * in_format,char * filename)
{
int ret = 0;
AVPacket pkt_in;
AVFrame * picture = av_frame_alloc();
AVFrame * filt_frame = av_frame_alloc();
int got_picture_ptr = 0;
int i ,j ,k = 0;
AVCodecContext * pCodecCtx = m_in_format_ctx->streams[video_stream_idx]->codec;
uint8_t * video_decode_buf =( uint8_t *)calloc(1,pCodecCtx->width* pCodecCtx->height * 3 * sizeof(char));
FILE * fp_yuv = fopen(filename,"wb+");
av_init_packet(&pkt_in);;
picture= avcodec_alloc_frame();
//return 0 if OK, < 0 on error or end of file
while (av_read_frame(in_format, &pkt_in) == 0)
{
//video
if (pkt_in.stream_index == video_stream_idx)
{
ret = avcodec_decode_video2(in_format->streams[video_stream_idx]->codec, picture, &got_picture_ptr, &pkt_in);
//success decode
if(got_picture_ptr)
{
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, picture, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
{
printf("Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
continue;
}
else if (ret < 0)
{
break;
}
else
{
for(i = 0 ; i < pCodecCtx->height ; i++)
{
memcpy(video_decode_buf+pCodecCtx->width*i,
filt_frame->data[0]+ filt_frame->linesize[0]*i,
pCodecCtx->width);
}
for(j = 0 ; j < pCodecCtx->height/2 ; j++)
{
memcpy(video_decode_buf + pCodecCtx->width*i+pCodecCtx->width/2*j,
filt_frame->data[1]+ filt_frame->linesize[1]*j,
pCodecCtx->width/2);
}
for(k =0 ; k < pCodecCtx->height/2 ; k++)
{
memcpy(video_decode_buf+pCodecCtx->width*i+pCodecCtx->width/2*j+pCodecCtx->width/2*k,
filt_frame->data[2]+ filt_frame->linesize[2]*k,
pCodecCtx->width/2);
}
fwrite((const char *)video_decode_buf,1,pCodecCtx->width* pCodecCtx->height *3/2,fp_yuv);
av_frame_unref(filt_frame);
}
av_frame_unref(picture);
}
}
av_free_packet(&pkt_in);
}
av_frame_free(&picture);
av_frame_free(&filt_frame);
if (video_decode_buf)
{
free(video_decode_buf);
video_decode_buf = NULL;
}
fclose(fp_yuv);
return 1;
}
int init_filters(const char *filters_descr)
{
char args[512] = {0};
int ret = 0;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVCodecContext * pCodecCtx = m_in_format_ctx->streams[video_stream_idx]->codec;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
_snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->time_base.num, pCodecCtx->time_base.den,
pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",args, NULL, filter_graph);
if (ret < 0)
{
printf("Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",NULL, NULL, filter_graph);
if (ret < 0)
{
printf("Cannot create buffer sink\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,&inputs, &outputs, NULL)) < 0)
{
return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
return ret;
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return 1;
}
void uinit_filters()
{
avfilter_graph_free(&filter_graph);
}
//main.cpp
#include "ffmpeg.h"
int main(int argc, char** argv)
{
int ret = 0;
initialize_all();
ret = init_input(INPUTURL,&m_in_format_ctx);
if (ret != 1)
{
goto end;
}
const char * filter_descr = "movie=1.png[wm];[in][wm]overlay=5:5[out]";
ret = init_filters(filter_descr);
if (ret != 1)
{
goto end;
}
printf("--------程序运行开始----------\n");
read_frame(m_in_format_ctx,OUTPUTURL);
end:
uinit_filters();
uinit_input(m_in_format_ctx);
printf("--------程序运行结束----------\n");
printf("-------请按任意键退出---------\n");
return getchar();
}
程序运行效果
如有错误请指正:
交流请加QQ群:62054820
QQ:379969650.