写在前面
时间戳 时间基 帧 包 I B P SPS PPS 场 封装格式 编码格式 音频 视频
如果上面的这些关键字有些不了解的需要先查资料弄懂
ffmpeg转码一帧
转码过程
此文档中我会主要关注每一步的时间戳是如何处理的,这也是记录这篇文章的原因。
读取一帧
注册,打开输入,扫描输入信息(av_regigster_all(),avforamt_open_input(),avformat_find_stream_info(),扫描并打开解码器,根据解码和编码的参数打开编码器和输出容器)这些准备工作做完之后我们就会一帧一帧的读取音视频数据,并对其进行解码和编码。如何读取一帧数据呢?
av_read_frame(),调用这个函数!在ffmpeg程序中的读取一帧的流程如下,伪代码看下:
main()
{
transcode() {
while(信号) {
transcode_step() {
process_input() {
get_input_packet() {
av_read_frame()
}
}
}
}
}
}
下面是抠出一些av_read_frame()的代码,看如何读取一帧
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
// 如果输入的参数设置了对于一些丢失的显示时间戳自动生成,则genpts =1否则为0
const int genpts = s->flags & AVFMT_FLAG_GENPTS;
int eof = 0;
int ret;
AVStream *st;
// 不自动生成时间戳,一般读取使用此处代码
if (!genpts) {
// ffmpeg内部读取的时候其实是将解析出来的一帧帧数据放入一个队列中,所以此处先检查解析包队列中是否有数据,
// 如果有,直接读取,否则调用真正的读取函数。
// 此处为何要使用缓存队列而不是直接读取,因为在解析的时候有可能一次解析N个帧,
// 如mpegts 一个PES中可能包含2帧,所以解析出来的帧直接放入队列中。
ret = s->internal->packet_buffer
? ff_packet_list_get(&s->internal->packet_buffer,
&s->internal->packet_buffer_end, pkt)
: read_frame_internal(s, pkt);
if (ret < 0)
return ret;
goto return_packet;
}else{
// ...
}
}
接下来就是看这个正在的读取函数read_frame_internal()
开个小差
上面的代码中我们发现有一个packet_buffer,是一个list,用于缓存读取的帧数据,其实ffmpeg定义了好几个缓存,先了解下这些结构体
定义在 AVFormatContext->AVFormatInternal
/**
* This buffer is only needed when packets were already buffered but
* not decoded, for example to get the codec parameters in MPEG
* streams.
* 我的理解,这个包就是在解析编解码信息的时候会缓存一段数据到这个地方,待确定
*/
struct AVPacketList *packet_buffer;
struct AVPacketList *packet_buffer_end;
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
/**
* Raw packets from the demuxer, prior to parsing and decoding.
* This buffer is used for buffering packets until the codec can
* be identified, as parsing cannot be done without knowing the
* codec.
* 这个缓存区的是用于存放从demuxer读取的数据,没有分析和解码的数据,对于mpegts 文件来说,也就是PES包
*/
struct AVPacketList *raw_packet_buffer;
struct AVPacketList *raw_packet_buffer_end;
/**
* Packets split by the parser get queued here.
* 此处存放的就是解析完成之后的帧数据了,对于H264来说一包就是一帧
*/
struct AVPacketList *parse_queue;
struct AVPacketList *parse_queue_end;
从这几个缓冲区我们也可以看出,读取的顺序是这样的!dmuxer包->parse包->解码包,我们还是以mpegts举例
PES包 -> h264 I / B / P
raw_packet_buffer -> parse_queue
read_frame_internal
先来说说这个函数的主要工作,从format中读取一包数据,解析并将数据加入到缓冲区中
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
int ret = 0, i, got_packet = 0;
AVDictionary *metadata = NULL;
av_init_packet(pkt);
while (!got_packet && !s->internal->parse_queue) {
AVStream *st;
AVPacket cur_pkt;
/* read next packet */
// 如果队列中有缓存的数据,则使用format->read_packet(),mpegts->mpegts_read_packet()读取
ret = ff_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* flush the parsers */
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing)
parse_packet(s, NULL, st->index);
}
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
ret = 0;
st = s->streams[cur_pkt.stream_index];
/* update context if required */
if (st->internal->need_context_update) {
if (avcodec_is_open(st->internal->avctx)) {
av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n");
avcodec_close(st->internal->avctx);
st->info->found_decoder = 0;
}
/* close parser, because it depends on the codec */
if (st->parser && st->internal->avctx->codec_id != st->codecpar->codec_id) {
av_parser_close(st->parser);
st->parser = NULL;
}
ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);
if (ret < 0)
return ret;
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
/* update deprecated public codec context */
ret = avcodec_parameters_to_context(st->codec, st->codecpar);
if (ret < 0)
return ret;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
st->internal->need_context_update = 0;
}
if (cur_pkt.pts != AV_NOPTS_VALUE &&
cur_pkt.dts != AV_NOPTS_VALUE &&
cur_pkt.pts < cur_pkt.dts) {
av_log(s, AV_LOG_WARNING,
"Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
cur_pkt.stream_index,
av_ts2str(cur_pkt.pts),
av_ts2str(cur_pkt.dts),
cur_pkt.size);
}
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",
cur_pkt.stream_index,
av_ts2str(cur_pkt.pts),
av_ts2str(cur_pkt.dts),
cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codecpar->codec_id);
if (!st->parser) {
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codecpar->codec_id));
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
} else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
st->parser->flags |= PARSER_FLAG_ONCE;
else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
}
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
*pkt = cur_pkt;
compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts,
0, 0, AVINDEX_KEYFRAME);
}
got_packet = 1;
} else if (st->discard < AVDISCARD_ALL) {
if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
return ret;
st->codecpar->sample_rate = st->internal->avctx->sample_rate;
st->codecpar->bit_rate = st->internal->avctx->bit_rate;
st->codecpar->channels = st->internal->avctx->channels;
st->codecpar->channel_layout = st->internal->avctx->channel_layout;
st->codecpar->codec_id = st->internal->avctx->codec_id;
} else {
/* free packet */
av_packet_unref(&cur_pkt);
}
if (pkt->flags & AV_PKT_FLAG_KEY)
st->skip_to_keyframe = 0;
if (st->skip_to_keyframe) {
av_packet_unref(&cur_pkt);
if (got_packet) {
*pkt = cur_pkt;
}
got_packet = 0;
}
}
if (!got_packet && s->internal->parse_queue)
ret = ff_packet_list_get(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
if (ret >= 0) {
AVStream *st = s->streams[pkt->stream_index];
int discard_padding = 0;
if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
int64_t sample = ts_to_samples(st, pts);
int duration = ts_to_samples(st, pkt->duration);
int64_t end_sample = sample + duration;
if (duration > 0 && end_sample >= st->first_discard_sample &&
sample < st->last_discard_sample)
discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);
}
if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))
st->skip_samples = st->start_skip_samples;
if (st->skip_samples || discard_padding) {
uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
if (p) {
AV_WL32(p, st->skip_samples);
AV_WL32(p + 4, discard_padding);
av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding);
}
st->skip_samples = 0;
}
if (st->inject_global_side_data) {
for (i = 0; i < st->nb_side_data; i++) {
AVPacketSideData *src_sd = &st->side_data[i];
uint8_t *dst_data;
if (av_packet_get_side_data(pkt, src_sd->type, NULL))
continue;
dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
if (!dst_data) {
av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
continue;
}
memcpy(dst_data, src_sd->data, src_sd->size);
}
st->inject_global_side_data = 0;
}
}
av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
if (metadata) {
s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
av_dict_copy(&s->metadata, metadata, 0);
av_dict_free(&metadata);
av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
}
#if FF_API_LAVF_AVCTX
update_stream_avctx(s);
#endif
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"read_frame_internal stream=%d, pts=%s, dts=%s, "
"size=%d, duration=%"PRId64", flags=%d\n",
pkt->stream_index,
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->size, pkt->duration, pkt->flags);
return ret;
}
未完待续