ffmpeg android 多线程解码,android ijkplayer c层分析-prepare过程与读取线程(续3-解码核心video处理线程)...

read_thread这个最关键的读取线程中,逐步跟踪,可以明确stream_component_open---> decoder_start---> video_thread--->ffplay_video_thread。这个调用过程,在解码开始后的异步解码线程中,调用的是ffplay_video_thread。具体可见续1。这个函数是解码处理视频的核心:

static int ffplay_video_thread(void *arg)

{

FFPlayer *ffp = arg;

VideoState *is = ffp->is;

AVFrame *frame = av_frame_alloc();

double pts;

double duration;

int ret;

AVRational tb = is->video_st->time_base;

AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);

#if CONFIG_AVFILTER

AVFilterGraph *graph = avfilter_graph_alloc();

AVFilterContext *filt_out = NULL, *filt_in = NULL;

int last_w = 0;

int last_h = 0;

enum AVPixelFormat last_format = -2;

int last_serial = -1;

int last_vfilter_idx = 0;

if (!graph) {

av_frame_free(&frame);

return AVERROR(ENOMEM);

}

#else

ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, ffp_get_video_rotate_degrees(ffp));

#endif

if (!frame) {

#if CONFIG_AVFILTER

avfilter_graph_free(&graph);

#endif

return AVERROR(ENOMEM);

}

for (;;) {

ret = get_video_frame(ffp, frame);

if (ret < 0)

goto the_end;

if (!ret)

continue;

#if CONFIG_AVFILTER

if ( last_w != frame->width

|| last_h != frame->height

|| last_format != frame->format

|| last_serial != is->viddec.pkt_serial

|| ffp->vf_changed

|| last_vfilter_idx != is->vfilter_idx) {

SDL_LockMutex(ffp->vf_mutex);

ffp->vf_changed = 0;

av_log(NULL, AV_LOG_DEBUG,

"Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",

last_w, last_h,

(const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,

frame->width, frame->height,

(const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);

avfilter_graph_free(&graph);

graph = avfilter_graph_alloc();

if ((ret = configure_video_filters(ffp, graph, is, ffp->vfilters_list ? ffp->vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {

// FIXME: post error

SDL_UnlockMutex(ffp->vf_mutex);

goto the_end;

}

filt_in = is->in_video_filter;

filt_out = is->out_video_filter;

last_w = frame->width;

last_h = frame->height;

last_format = frame->format;

last_serial = is->viddec.pkt_serial;

last_vfilter_idx = is->vfilter_idx;

frame_rate = filt_out->inputs[0]->frame_rate;

SDL_UnlockMutex(ffp->vf_mutex);

}

ret = av_buffersrc_add_frame(filt_in, frame);

if (ret < 0)

goto the_end;

while (ret >= 0) {

is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

ret = av_buffersink_get_frame_flags(filt_out, frame, 0);

if (ret < 0) {

if (ret == AVERROR_EOF)

is->viddec.finished = is->viddec.pkt_serial;

ret = 0;

break;

}

is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;

if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)

is->frame_last_filter_delay = 0;

tb = filt_out->inputs[0]->time_base;

#endif

duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);

pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);

ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);

av_frame_unref(frame);

#if CONFIG_AVFILTER

}

#endif

if (ret < 0)

goto the_end;

}

the_end:

#if CONFIG_AVFILTER

avfilter_graph_free(&graph);

#endif

av_frame_free(&frame);

return 0;

}

前面的初始化过程暂不分析,直接看for(;;)开始的这个循环,1.get_video_frame读取一帧;2.av_buffersrc_add_frame添加帧到缓冲中;3.queue_picture将帧数据通过ffmpeg解码后转为yup格式帧,然后调用sol进行渲染。大体是这3个步骤。

虽然前文已有介绍get_video_frame,但是太粗略了,这次仔细进去看下:

static int get_video_frame(FFPlayer *ffp, AVFrame *frame)

{

VideoState *is = ffp->is;

int got_picture;

ffp_video_statistic_l(ffp);

if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)

return -1;

if (got_picture) {

double dpts = NAN;

if (frame->pts != AV_NOPTS_VALUE)

dpts = av_q2d(is->video_st->time_base) * frame->pts;

frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);

if (ffp->framedrop>0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {

if (frame->pts != AV_NOPTS_VALUE) {

double diff = dpts - get_master_clock(is);

if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&

diff - is->frame_last_filter_delay < 0 &&

is->viddec.pkt_serial == is->vidclk.serial &&

is->videoq.nb_packets) {

is->frame_drops_early++;

is->continuous_frame_drops_early++;

if (is->continuous_frame_drops_early > ffp->framedrop) {

is->continuous_frame_drops_early = 0;

} else {

av_frame_unref(frame);

got_picture = 0;

}

}

}

}

}

return got_picture;

}

decoder_decode_frame毫无疑问是个关键,解码frame:

static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {

int got_frame = 0;

do {

int ret = -1;

if (d->queue->abort_request)

return -1;

if (!d->packet_pending || d->queue->serial != d->pkt_serial) {

AVPacket pkt;

do {

if (d->queue->nb_packets == 0)

SDL_CondSignal(d->empty_queue_cond);

if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)

return -1;

if (pkt.data == flush_pkt.data) {

avcodec_flush_buffers(d->avctx);

d->finished = 0;

d->next_pts = d->start_pts;

d->next_pts_tb = d->start_pts_tb;

}

} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);

av_packet_unref(&d->pkt);

d->pkt_temp = d->pkt = pkt;

d->packet_pending = 1;

}

switch (d->avctx->codec_type) {

case AVMEDIA_TYPE_VIDEO: {

ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);

if (got_frame) {

ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");

if (ffp->decoder_reorder_pts == -1) {

frame->pts = av_frame_get_best_effort_timestamp(frame);

} else if (!ffp->decoder_reorder_pts) {

frame->pts = frame->pkt_dts;

}

}

}

break;

case AVMEDIA_TYPE_AUDIO:

ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);

if (got_frame) {

AVRational tb = (AVRational){1, frame->sample_rate};

if (frame->pts != AV_NOPTS_VALUE)

frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);

else if (d->next_pts != AV_NOPTS_VALUE)

frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);

if (frame->pts != AV_NOPTS_VALUE) {

d->next_pts = frame->pts + frame->nb_samples;

d->next_pts_tb = tb;

}

}

break;

case AVMEDIA_TYPE_SUBTITLE:

ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);

break;

default:

break;

}

if (ret < 0) {

d->packet_pending = 0;

} else {

d->pkt_temp.dts =

d->pkt_temp.pts = AV_NOPTS_VALUE;

if (d->pkt_temp.data) {

if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)

ret = d->pkt_temp.size;

d->pkt_temp.data += ret;

d->pkt_temp.size -= ret;

if (d->pkt_temp.size <= 0)

d->packet_pending = 0;

} else {

if (!got_frame) {

d->packet_pending = 0;

d->finished = d->pkt_serial;

}

}

}

} while (!got_frame && !d->finished);

return got_frame;

}

一个大循环(一直到没有帧或者结尾为止)里面套着一个小循环和一个switch case的判断,以及末尾的一些状态更新。先来看小循环:

AVPacket pkt;

do {

if (d->queue->nb_packets == 0)

SDL_CondSignal(d->empty_queue_cond);

if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)

return -1;

if (pkt.data == flush_pkt.data) {

avcodec_flush_buffers(d->avctx);

d->finished = 0;

d->next_pts = d->start_pts;

d->next_pts_tb = d->start_pts_tb;

}

} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);

av_packet_unref(&d->pkt);

d->pkt_temp = d->pkt = pkt;

d->packet_pending = 1;

这里看到一个关键的数据结构AVPacket,表示的是音视频的一个数据帧:

typedef struct AVPacket {

/**

* A reference to the reference-counted buffer where the packet data is

* stored.

* May be NULL, then the packet data is not reference-counted.

*/

AVBufferRef *buf;

/**

* Presentation timestamp in AVStream->time_base units; the time at which

* the decompressed packet will be presented to the user.

* Can be AV_NOPTS_VALUE if it is not stored in the file.

* pts MUST be larger or equal to dts as presentation cannot happen before

* decompression, unless one wants to view hex dumps. Some formats misuse

* the terms dts and pts/cts to mean something different. Such timestamps

* must be converted to true pts/dts before they are stored in AVPacket.

*/

int64_t pts;

/**

* Decompression timestamp in AVStream->time_base units; the time at which

* the packet is decompressed.

* Can be AV_NOPTS_VALUE if it is not stored in the file.

*/

int64_t dts;

uint8_t *data;

int size;

int stream_index;

/**

* A combination of AV_PKT_FLAG values

*/

int flags;

/**

* Additional packet data that can be provided by the container.

* Packet can contain several types of side information.

*/

AVPacketSideData *side_data;

int side_data_elems;

/**

* Duration of this packet in AVStream->time_base units, 0 if unknown.

* Equals next_pts - this_pts in presentation order.

*/

int64_t duration;

int64_t pos; ///< byte position in stream, -1 if unknown

#if FF_API_CONVERGENCE_DURATION

/**

* @deprecated Same as the duration field, but as int64_t. This was required

* for Matroska subtitles, whose duration values could overflow when the

* duration field was still an int.

*/

attribute_deprecated

int64_t convergence_duration;

#endif

} AVPacket;

可以看到有显示和解码的时间戳dts pts,有在网络流中的位置pos,实际数据指针data,大小size,所属流的索引stream_index。基本上就是对一个数据帧的描述。我理解的是一个未解码的压缩数据帧。

回到小循环里看,packet_queue_get_or_buffering,读取一个压缩数据帧:

static int packet_queue_get_or_buffering(FFPlayer *ffp, PacketQueue *q, AVPacket *pkt, int *serial, int *finished)

{

assert(finished);

if (!ffp->packet_buffering)

return packet_queue_get(q, pkt, 1, serial);

while (1) {

int new_packet = packet_queue_get(q, pkt, 0, serial);

if (new_packet < 0)

return -1;

else if (new_packet == 0) {

if (q->is_buffer_indicator && !*finished)

ffp_toggle_buffering(ffp, 1);

new_packet = packet_queue_get(q, pkt, 1, serial);

if (new_packet < 0)

return -1;

}

if (*finished == *serial) {

av_packet_unref(pkt);

continue;

}

else

break;

}

return 1;

}

packet_queue_get是从队列中获取一个pkt,但是他的参数不同调用的含义并不相同:

/* return < 0 if aborted, 0 if no packet and > 0 if packet. */

static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)

{

MyAVPacketList *pkt1;

int ret;

SDL_LockMutex(q->mutex);

for (;;) {

if (q->abort_request) {

ret = -1;

break;

}

pkt1 = q->first_pkt;

if (pkt1) {

q->first_pkt = pkt1->next;

if (!q->first_pkt)

q->last_pkt = NULL;

q->nb_packets--;

q->size -= pkt1->pkt.size + sizeof(*pkt1);

q->duration -= pkt1->pkt.duration;

*pkt = pkt1->pkt;

if (serial)

*serial = pkt1->serial;

#ifdef FFP_MERGE

av_free(pkt1);

#else

pkt1->next = q->recycle_pkt;

q->recycle_pkt = pkt1;

#endif

ret = 1;

break;

} else if (!block) {

ret = 0;

break;

} else {

SDL_CondWait(q->cond, q->mutex);

}

}

SDL_UnlockMutex(q->mutex);

return ret;

}

又是个循环,如果被终止了,直接返回-1。读取队列(其实是个链表)中的第一个pkt,然后将其出队,下一个成为第一个。如果没读到有2种情况,根据参数block(是否阻塞),非阻塞直接返回0,阻塞线程等待条件唤醒,条件符合唤醒后继续执行循环,从头开始读取。

好吧,回来看packet_queue_get_or_buffering,开头就是一个判断,如果不在缓存中,直接按照阻塞方式读取pkt,并返回(这意味着网络传输还未收到数据包,因此需要先休眠,直到有数据到来后再进行处理)。下面的while(1)开始是处理缓存中已经可以读到数据包的情况。首先进行非阻塞读取,如果被终止,直接返回-1,否则如果没有pkt,ffp_toggle_buffering更新buffer,然后在阻塞读取。那么这个ffp_toggle_buffering在干什么呢?往下跟踪2层,是ffp_toggle_buffering_l函数:

void ffp_toggle_buffering_l(FFPlayer *ffp, int buffering_on)

{

if (!ffp->packet_buffering)

return;

VideoState *is = ffp->is;

if (buffering_on && !is->buffering_on) {

av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: start\n");

is->buffering_on = 1;

stream_update_pause_l(ffp);

ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_START);

} else if (!buffering_on && is->buffering_on){

av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: end\n");

is->buffering_on = 0;

stream_update_pause_l(ffp);

ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_END);

}

}

无论什么情况,大体都会走stream_update_pause_l,然后进行消息通知,好吧,看看stream_update_pause_l,往下走2层是stream_toggle_pause_l:

static void stream_toggle_pause_l(FFPlayer *ffp, int pause_on)

{

VideoState *is = ffp->is;

if (is->paused && !pause_on) {

is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;

#ifdef FFP_MERGE

if (is->read_pause_return != AVERROR(ENOSYS)) {

is->vidclk.paused = 0;

}

#endif

set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);

} else {

}

set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);

is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = pause_on;

SDL_AoutPauseAudio(ffp->aout, pause_on);

}

这不是暂停与恢复的调用吗。好吧,咱们回顾一下,也就是说,读取pkt的过程,会先读取缓存,如果有直接返回,如果换成读取到的是0,也就是没内容,那么要阻塞在这里,同时暂停播放,那么也即是咱们在看视频的时候出现的缓冲等待的情况了。

回到decoder_decode_frame的小循环里。小循环的意思大约是读取pkt,直到与全局的flush_pkt不相等,我的理解是flush_pkt类似一个标记的作用,用来表示到达了改解码的那个pkt。在此之前循环寻找缓存中的pkt(不知对不对,欢迎指正)。

往下继续看小循环之后的switch case,以video的case为例:

case AVMEDIA_TYPE_VIDEO: {

ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);

if (got_frame) {

ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");

if (ffp->decoder_reorder_pts == -1) {

frame->pts = av_frame_get_best_effort_timestamp(frame);

} else if (!ffp->decoder_reorder_pts) {

frame->pts = frame->pkt_dts;

}

}

}

break;

这里调用avcodec_decode_video2解码,传递进入刚才的pkt,如果获取的got_frame有正常,则调用sdl准备开始显示,并且更新下pts。

解码的过程后续有机会再分析。现在还是有个疑问,flush_pkt到底是个什么?我上面的猜测不知道对不对。继续找找线索吧。在ffp_global_init中:

av_init_packet(&flush_pkt);

flush_pkt.data = (uint8_t *)&flush_pkt;

初始化清空,并且将他的data赋值为自己的地址。有点奇怪,继续找:

static void packet_queue_start(PacketQueue *q)

{

SDL_LockMutex(q->mutex);

q->abort_request = 0;

packet_queue_put_private(q, &flush_pkt);

SDL_UnlockMutex(q->mutex);

}

在初始化队列的时候就加入了这个都是空的pkt。那么之前的小循环的地方是否可理解为读取pkt,直到缓存队列中没东西为止?不敢肯定,这里先留个疑问吧。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值