Filter5----使用 filter

使用filter的步骤

  1. 获得解码后的原始数据 YUV/PCM
  2. 将数据添加到buffer filter
  3. 从buffer sink中取出处理好的数据
  4. 当所有数据处理完后,释放资源

将数据添加到buffer filte

/**
 * Add a frame to the buffer source.
 *
 * @param ctx   an instance of the buffersrc filter
 * @param frame frame to be added. If the frame is reference counted, this
 * function will take ownership of the reference(s) and reset the frame.
 * Otherwise the frame data will be copied. If this function returns an error,
 * the input frame is not touched.
 *
 * @return 0 on success, a negative AVERROR on error.
 *
 * @note the difference between this function and av_buffersrc_write_frame() is
 * that av_buffersrc_write_frame() creates a new reference to the input frame,
 * while this function takes ownership of the reference passed to it.
 *
 * This function is equivalent to av_buffersrc_add_frame_flags() without the
 * AV_BUFFERSRC_FLAG_KEEP_REF flag.
 */
av_warn_unused_result
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);

从buffer sink中取处理好的数据

/**
 * Get a frame with filtered data from sink and put it in frame.
 *
 * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
 * @param frame pointer to an allocated frame that will be filled with data.
 *              The data must be freed using av_frame_unref() / av_frame_free()
 *
 * @return
 *         - >= 0 if a frame was successfully returned.
 *         - AVERROR(EAGAIN) if no frames are available at this point; more
 *           input frames must be added to the filtergraph to get more output.
 *         - AVERROR_EOF if there will be no more output frames on this sink.
 *         - A different negative AVERROR code in other failure cases.
 */
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);

完整代码

#include "filter1.h"

static AVFormatContext *fmt_ctx = NULL;
static AVCodec *video_codec = NULL;
static AVCodecParameters *video_codec_par = NULL;
static AVCodecContext *video_codec_ctx = NULL;
static int video_stream_index = -1;
static AVFilterGraph *graph = NULL;
static AVFilterInOut *input = NULL;
static AVFilterInOut *output = NULL;
static AVFilterContext *buffer_filter_ctx = NULL;
static AVFilterContext *buffer_shink_filter_ctx = NULL;
static int open_input_file(const char *path)
{

    int ret = 0;
    ret = avformat_open_input(&fmt_ctx, path, NULL, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avformat_open_input faile path=%s", path);
        return ret;
    }

    ret = avformat_find_stream_info(fmt_ctx, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avformat_find_stream_info faile ");
        return ret;
    }

    av_dump_format(fmt_ctx, 0, path, 0);
    video_stream_index = av_find_best_stream(fmt_ctx,
                                             AVMEDIA_TYPE_VIDEO,
                                             -1,
                                             -1,
                                             &video_codec,
                                             -1);

    if (video_stream_index < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "av_find_best_stream faile ");
        ret = -1;
        return ret;
    }

    video_codec_par = fmt_ctx->streams[video_stream_index]->codecpar;

    video_codec_ctx = avcodec_alloc_context3(video_codec);
    ret = avcodec_parameters_to_context(video_codec_ctx, video_codec_par);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avcodec_parameters_to_context faile ");
        return ret;
    }

    ret = avcodec_open2(video_codec_ctx, video_codec, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avcodec_open2 faile ");
        return ret;
    }

    return ret;
}

static int init_filters(const char *filter_desc)
{
    int ret = -1;
    //创建Graph

    char args[512] = {};
    // "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d"
    AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;

    //第一步创建graph
    graph = avfilter_graph_alloc();
    if (!graph)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_graph_alloc fail");
        goto __INIT_FILTER_ERROR;
    }

    //第二步创建buffer_filter_ctx
    const AVFilter *bufsrc = avfilter_get_by_name("buffer");
    if (!bufsrc)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_get_by_name fail");
        goto __INIT_FILTER_ERROR;
    }

    //args 来源于ffmpeg -h filter=buffer
    snprintf(args,
             512,
             "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
             video_codec_par->width,
             video_codec_par->height,
             video_codec_par->format,
             time_base.num,
             time_base.den,
             video_codec_ctx->sample_aspect_ratio.num,
             video_codec_ctx->sample_aspect_ratio.den);

    ret = avfilter_graph_create_filter(&buffer_filter_ctx, bufsrc, "in", args, NULL, graph);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_graph_create_filter buffer_filter_ctx %s", av_err2str(ret));
        goto __INIT_FILTER_ERROR;
    }

    //第三步创建buffersink filter
    const AVFilter *bufsink = avfilter_get_by_name("buffersink");
    if (!bufsink)
    {
        av_log(NULL, AV_LOG_ERROR, "creater bufsink fail");
        goto __INIT_FILTER_ERROR;
    }
    ret = avfilter_graph_create_filter(&buffer_shink_filter_ctx, bufsink, "out", NULL, NULL, graph);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_graph_create_filter buffer_shink_filter_ctx %s", av_err2str(ret));
        goto __INIT_FILTER_ERROR;
    }
    //buffer shink  ffmpeg -h filter=buffersink
    enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P,
                                     AV_PIX_FMT_GRAY8,
                                     AV_PIX_FMT_NONE};
    av_opt_set_int_list(buffer_shink_filter_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);

    //第四步 创建avfilter inout链表

    input = avfilter_inout_alloc();
    output = avfilter_inout_alloc();
    if (!input || !output)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_inout_alloc fail");
        goto __INIT_FILTER_ERROR;
    }
    input->name = av_strdup("out");
    input->filter_ctx = buffer_shink_filter_ctx;
    input->pad_idx = 0;
    input->next = NULL;
    output->name = av_strdup("in");
    output->filter_ctx = buffer_filter_ctx;
    output->pad_idx = 0;
    output->next = NULL;

    //第五步 分析filter描述符,并构建AVFilterGraph
    ret = avfilter_graph_parse_ptr(graph, filter_desc, &input, &output, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_graph_parse_ptr %s", av_err2str(ret));
        goto __INIT_FILTER_ERROR;
    }

    //第六步 使构建好的AVFilterGraph生效
    ret = avfilter_graph_config(graph, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "avfilter_graph_config %s", av_err2str(ret));
        goto __INIT_FILTER_ERROR;
    }
    return 0;

__INIT_FILTER_ERROR:
    if (graph)
    {
        avfilter_graph_free(&graph);
    }
    if (input)
    {
        avfilter_inout_free(&input);
    }
    if (output)
    {
        avfilter_inout_free(&output);
    }

    if (buffer_filter_ctx)
    {
        avfilter_free(buffer_filter_ctx);
    }

    if (buffer_shink_filter_ctx)
    {
        avfilter_free(buffer_shink_filter_ctx);
    }

    if (buffer_shink_filter_ctx)
    {
        avfilter_free(buffer_shink_filter_ctx);
    }
    return ret;
}

static int do_frame(AVFrame *filter_frame, FILE *out_fb)
{
    int ret = -1;
    int y_size = filter_frame->width * filter_frame->height;
    fwrite(filter_frame->data[0], 1, y_size, out_fb);
    fwrite(filter_frame->data[1], 1, y_size / 4, out_fb);
    fwrite(filter_frame->data[2], 1, y_size / 4, out_fb);
    fflush(out_fb);
    return 0;
}

static int filter_video(AVFrame *frame, AVFrame *filter_frame, FILE *out_fb)
{
    int ret = 0;
    ret = av_buffersrc_add_frame(buffer_filter_ctx, frame);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "av_buffersrc_add_frame fail\n");
        return ret;
    }
    while (1)
    {
        ret = av_buffersink_get_frame(buffer_shink_filter_ctx, filter_frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        {
            break;
        }
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "av_buffersink_get_frame fail\n");
            return ret;
        }
        do_frame(filter_frame, out_fb);
        av_frame_unref(filter_frame);
    }
    av_frame_unref(frame);

    return 0;
}

int decode_frame_and_filter(AVFrame *frame, AVFrame *filter_frame, FILE *out_fb)
{
    int ret = 0;

    while (1)
    {
        ret = avcodec_receive_frame(video_codec_ctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        {
            break;
        }

        if (ret < 0)
        {
            //解码出错
            av_log(NULL, AV_LOG_ERROR, "解码出错\n");
            return ret;
        }
        ret = filter_video(frame, filter_frame, out_fb);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "滤镜出错\n");
            return ret;
        }
    }

    return 0;
}

static int flush_decode(AVFrame *frame, AVFrame *filter_frame, FILE* out_fb)
{
    int ret = avcodec_send_packet(video_codec_ctx, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "flust_decode avcodec_send_packet fail");
        return ret;
    }
    ret = decode_frame_and_filter(frame, filter_frame, out_fb);
    return ret;
}

int main(int argc, char *argv[])
{
    int ret = 0;
    // const char *filter_desc = "drawbox=30:10:64:64:red";
    const char *filter_desc = "drawbox=x=30:y=10:w=64:h=64:c=red";

    const char *path = "/Users/yuanxuzhen/study/ffmpeg_study/ffmpeg/resource/in_audio.mp4";
    const char *out_path = "/Users/yuanxuzhen/study/ffmpeg_study/ffmpeg/resource/filter.yuv";

    av_log_set_level(AV_LOG_INFO);
    AVFrame *frame = NULL;
    AVPacket pkt;
    AVFrame *filter_frame = NULL;
    FILE *out_fb = NULL;

    ret = open_input_file(path);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "open_input_file fail");
        goto __ERROR;
    }

    ret = init_filters(filter_desc);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "init_filters fail");
        goto __ERROR;
    }
    frame = av_frame_alloc();
    filter_frame = av_frame_alloc();
    if (!frame || !filter_frame)
    {
        av_log(NULL, AV_LOG_ERROR, "av_frame_alloc fail\n");
        goto __ERROR;
    }
    out_fb = fopen(out_path, "wb+");
    while (1)
    {
        ret = av_read_frame(fmt_ctx, &pkt);

        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "av_read_frame fail\n");
            break;
        }
        if (pkt.stream_index != video_stream_index)
        {
            av_packet_unref(&pkt);
            continue;
        }
        av_log(NULL, AV_LOG_ERROR, "av_read_frame pkt.pts=%lld\n", pkt.pts);

        ret = avcodec_send_packet(video_codec_ctx, &pkt);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "avcodec_send_packet fail\n");
            break;
        }
        ret = decode_frame_and_filter(frame, filter_frame, out_fb);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
        {
            continue;
        }

        if (ret < 0)
        {
            break;
        }
        av_packet_unref(&pkt);
    }

    //刷新解码器
    ret = flush_decode(frame, filter_frame, out_fb);
__ERROR:
    if (frame)
    {
        av_frame_free(&frame);
    }
    if (filter_frame)
    {
        av_frame_free(&filter_frame);
    }
    if (video_codec_ctx)
    {
        avcodec_close(video_codec_ctx);
        avcodec_free_context(&video_codec_ctx);
    }

    if (fmt_ctx)
    {
        avformat_close_input(&fmt_ctx);
        avformat_free_context(fmt_ctx);
    }

    if (out_fb)
    {
        fclose(out_fb);
    }

    return ret;
}

gitee地址

https://gitee.com/creat151/ffmpeg.git

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值