ffplay的解码线程独立于读线程,并且每种类型的流(AVStream)都有其各自的解码线程
0、Clock用于同步音视频。
1、read_thread读线程将pkt从文件读取并放入PacketQueue
2、解码线程负责将PacketQueue数据解码为AVFrame,并存入FrameQueue。
三个解码线程大同小异,这里以video_thead为例:
忽略滤镜并简化后代码:
static int video_thread(void *arg)
{
VideoState *is = arg;
AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
for (;;) {
1、解码得到一帧frame
ret = get_video_frame(is, frame); //解码获取一帧视频画面
if (ret < 0)//解码结束
goto the_end;
if (!ret)//没有解码得到画面
continue;
2、计算持续时间duration和pts
double duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){
frame_rate.den, frame_rate.num}) : 0); 用帧率估计帧时长
double pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); 将pts转化为秒为单位
3、将frame放入Framequene
int ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);//将解码后的帧存入FrameQueue
av_frame_unref(frame);
if (ret < 0)
goto the_end;
}
the_end:
av_frame_free(&frame);
return 0;
}
1、总的解码函数get_video_frame = 解码decoder_decode_frame + 丢帧
static int get_video_frame(VideoState *is, AVFrame *frame)
{
int got_picture;
1、解码音视频
if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
return -1;
if (got_picture) {
double dpts = NAN;
if (frame->pts != AV_NOPTS_VALUE)
dpts = av_q2d(is->video_st->time_base) * frame->pts; 由frame->pts转换成秒dpts
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); 推测帧的长宽比
判断是否需要舍弃该帧
//控制是否丢帧的开关变量是framedrop, =1判断是否丢帧;
//=0始终不丢帧
//=-1(默认值),则在主时钟不是video的时候,判断是否丢帧。
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
if (frame->pts != AV_NOPTS_VALUE) {
double diff = dpts