------------------------------------------------------------
author: hjjdebug
date: 2024年 08月 09日 星期五 14:24:27 CST
description: av_buffersink_get_frame() 函数分析
------------------------------------------------------------
添加一个scale 过滤器后的调用栈, 我们来看看av_buffersink_get_frame 是怎样工作的.
关于构建过滤图及发frame的分析请参考以前的博客.本篇只着重于 av_buffersink_get_frame()
0 in scale_frame of libavfilter/vf_scale.c:651
1 in filter_frame of libavfilter/vf_scale.c:820
2 in ff_filter_frame_framed of libavfilter/avfilter.c:1085
3 in ff_filter_frame_to_filter of libavfilter/avfilter.c:1233
4 in ff_filter_activate_default of libavfilter/avfilter.c:1282
5 in ff_filter_activate of libavfilter/avfilter.c:1441
6 in ff_filter_graph_run_once of libavfilter/avfiltergraph.c:1403
7 in get_frame_internal of libavfilter/buffersink.c:131
8 in av_buffersink_get_frame_flags of libavfilter/buffersink.c:142
9 in av_buffersink_get_frame of libavfilter/buffersink.c:90
10 in main of filtering_video.c:245
何以有这么长的调用,我大概浏览一下,感觉这一次从上而下比较容易分析清楚.
就是从大号调用开始.
第10层: 调用函数
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
第9层. 只是一个简单的包装函数,加flags为0
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersink_get_frame_flags(ctx, frame, 0);
}
第8层. 还是一个简单的调用,添加了一个参数(从ctx中拎出来的)最少采样数
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
return get_frame_internal(ctx, frame, flags, ctx->inputs[0]->min_samples);
}
第7层,核心控制单元
static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples)
{
BufferSinkContext *buf = ctx->priv;
if (buf->peeked_frame)
return return_or_keep_frame(buf, frame, buf->peeked_frame, flags);
AVFilterLink *inlink = ctx->inputs[0];
int status, ret;
AVFrame *cur_frame;
int64_t pts;
while (1) {
ret = samples ? ff_inlink_consume_samples(inlink, samples, samples, &cur_frame) :
ff_inlink_consume_frame(inlink, &cur_frame);
if (ret < 0) { return ret; }
else if (ret) { //为真整个graph 处理完毕
/* TODO return the frame instead of copying it */
return return_or_keep_frame(buf, frame, cur_frame, flags);
}
//下面是ret=0,代表graph还没有处理完
else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { return status; }
else if ((flags & AV_BUFFERSINK_FLAG_NO_REQUEST)) { return AVERROR(EAGAIN); }
else if (inlink->frame_wanted_out) { //继续执行filter 操作
ret = ff_filter_graph_run_once(ctx->graph); //关注这里,这里会完成1次filter操作
if (ret < 0) return ret;
}
else
{
ff_inlink_request_frame(inlink);
}
}
}
第6层.只有一个输入参数,就是graph,从中找到需要工作的filter
int ff_filter_graph_run_once(AVFilterGraph *graph)
{
unsigned i;
av_assert0(graph->nb_filters);
AVFilterContext *filter = graph->filters[0]; //示例第一个就是scale filter,这只是赋个初值
for (i = 1; i < graph->nb_filters; i++) //在这个地方,查找filter链中下一个filter
if (graph->filters[i]->ready > filter->ready)
filter = graph->filters[i]; //这里找到需要处理的filter
if (!filter->ready)
return AVERROR(EAGAIN);
return ff_filter_activate(filter); //调用选中的filter.
}
第5层.只有一个输入参数filter, 功能: 确定向下的调用函数 activate
int ff_filter_activate(AVFilterContext *filter)
{
filter->ready = 0; //把ready改成0,表示执行过了
int ret = filter->filter->activate ? filter->filter->activate(filter) :
ff_filter_activate_default(filter); // scale 过滤器走的是default
if (ret == FFERROR_NOT_READY) ret = 0;
return ret;
}
第4层.只有一个输入参数,filter,判定是有数据要进filter,还是有数据要出filter
static int ff_filter_activate_default(AVFilterContext *filter)
{
unsigned i;
for (i = 0; i < filter->nb_inputs; i++) {
if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) { //满足frame进filter的条件
return ff_filter_frame_to_filter(filter->inputs[i]); //先走这里, 有frame要进scale
}
}
for (i = 0; i < filter->nb_inputs; i++) {
if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
return forward_status_change(filter, filter->inputs[i]);
}
}
for (i = 0; i < filter->nb_outputs; i++) {
if (filter->outputs[i]->frame_wanted_out &&
!filter->outputs[i]->frame_blocked_in) {
return ff_request_frame_to_filter(filter->outputs[i]);
}
}
return FFERROR_NOT_READY;
}
第3层.只有一个输入参数,link, 功能:把数据分离开,准备过filter
static int ff_filter_frame_to_filter(AVFilterLink *link)
{
AVFrame *frame = NULL;
AVFilterContext *dst = link->dst;
int ret;
av_assert1(ff_framequeue_queued_frames(&link->fifo)); //断言FrameQueue中一定有数
ret = link->min_samples ?
ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
ff_inlink_consume_frame(link, &frame); //从frameQueue中摘下一个frame
/* The filter will soon have received a new frame, that may allow it to
produce one or more: unblock its outputs. */
filter_unblock(dst);
link->frame_count_out--;
ret = ff_filter_frame_framed(link, frame);
if (ret < 0 && ret != link->status_out) {
ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
} else {
ff_filter_set_ready(dst, 300); //把dst设置为准备好,传递下去.
}
return ret;
}
//从link中取frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
{
*rframe = NULL;
AVFrame frame = ff_framequeue_take(&link->fifo);
consume_update(link, frame);
*rframe = frame;
return 1;
}
第2层.两个输入参数,一个link, 一个待处理的frame,功能:确定实际执行的过滤器函数
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
int ret;
int (*filter_frame)(AVFilterLink *, AVFrame *);
AVFilterPad *dst = link->dstpad;
//对scale过滤器而言,它记录了scale.c 中的filter_frame地址 ,
//以上的调用全是框架, 到这里即将进入实际的下层实现函数
if (!(filter_frame = dst->filter_frame))
filter_frame = default_filter_frame;
if (dst->needs_writable) {
ret = ff_inlink_make_frame_writable(link, &frame);
}
ff_inlink_process_commands(link, frame);
AVFilterContext *dstctx = link->dst;
dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
if (dstctx->is_disabled && (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
filter_frame = default_filter_frame;
ret = filter_frame(link, frame); //从这里向下调用,才是下层实现函数
link->frame_count_out++;
return ret;
}
第1层: scale.c 中的下层实现函数, 完成真正的frame 过滤
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
AVFilterContext *ctx = link->dst;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int ret = scale_frame(link, in, &out);
//scale.c中的代码又调用了avfilter中代码,将自己的输出挂到了outlink上,以便完成后续过滤链操作
//数据挂上就返回了,一直返回到第7层get_frame_internal
//该函数在av_buffersrc_add_frame_flags 中已经讲述过,这里就忽略了!
if (out)
return ff_filter_frame(outlink, out);
return ret;
}
第0层: 具体的scale 过程,代码较长也不用细看,就是它完成了具体的filter操作,
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);//得到格式描述符
AVFilterContext *ctx = link->dst;
ScaleContext *scale = ctx->priv;
scale->hsub = desc->log2_chroma_w; //填充scale_ctx 的一些属性
scale->vsub = desc->log2_chroma_h;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); //分配内存存放输出frame
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
*frame_out = out;
av_frame_copy_props(out, in);
out->width = outlink->w;
out->height = outlink->h;
if (scale->output_is_pal)
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
int in_range = in->color_range; //frame 的color_range
if ( scale->in_color_matrix
|| scale->out_color_matrix
|| scale-> in_range != AVCOL_RANGE_UNSPECIFIED
|| in_range != AVCOL_RANGE_UNSPECIFIED
|| scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
int in_full, out_full, brightness, contrast, saturation;
const int *inv_table, *table;
sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
(int **)&table, &out_full,
&brightness, &contrast, &saturation);
if (scale->in_color_matrix)
inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
if (scale->out_color_matrix)
table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
else if (scale->in_color_matrix)
table = inv_table;
if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
else if (in_range != AVCOL_RANGE_UNSPECIFIED)
in_full = (in_range == AVCOL_RANGE_JPEG);
if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
out_full = (scale->out_range == AVCOL_RANGE_JPEG);
sws_setColorspaceDetails(scale->sws, inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
if (scale->isws[0])
sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
if (scale->isws[1])
sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
table, out_full,
brightness, contrast, saturation);
out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
}
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
} else if (scale->nb_slices) {
int i, slice_h, slice_start, slice_end = 0;
const int nb_slices = FFMIN(scale->nb_slices, link->h);
for (i = 0; i < nb_slices; i++) {
slice_start = slice_end;
slice_end = (link->h * (i+1)) / nb_slices;
slice_h = slice_end - slice_start;
scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
}
} else {
scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0); //把in中的数据scale后放入到了out中
}
av_frame_free(&in);
return 0;
}