使用ffmpeg接口解封装解码为YUV

第一步: 相关结构的初始化

第一步: 通过打开文件获取 AVFormatContext(解封装输入文件格式)

第二步: 通过打开文件的 AVFormatContext,获取音频和视频AVStream

第三步: 获取音频和视频解码器上下文和解码器实例

avcodec_find_decoder和avcodec_open2打开解码器准备解码音视频流。该部分的代码实现如:

static int open_codec_context(IOFileName &files, DemuxingVideoAudioContex &va_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;

ret = av_find_best_stream(va_ctx.fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0)
{
fprintf(stderr, “Could not find %s stream in input file ‘%s’\n”, av_get_media_type_string(type), files.src_filename);
return ret;
}
else
{
stream_index = ret;
st = va_ctx.fmt_ctx->streams[stream_index];

* find decoder for the stream *
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec)
{
fprintf(stderr, “Failed to find %s codec\n”, av_get_media_type_string(type));
return AVERROR(EINVAL);
}

* Init the decoders, with or without reference counting *
av_dict_set(&opts, “refcounted_frames”, files.refcount ? “1” : “0”, 0);
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0)
{
fprintf(stderr, “Failed to open %s codec\n”, av_get_media_type_string(type));
return ret;
}

switch (type)  
{  
case AVMEDIA\_TYPE\_VIDEO:  
    va\_ctx.video\_stream\_idx = stream\_index;  
    va\_ctx.video\_stream = va\_ctx.fmt\_ctx->streams[stream\_index];  
    va\_ctx.video\_dec\_ctx = va\_ctx.video\_stream->codec;  
    break;  
case AVMEDIA\_TYPE\_AUDIO:  
    va\_ctx.audio\_stream\_idx = stream\_index;  
    va\_ctx.audio\_stream = va\_ctx.fmt\_ctx->streams[stream\_index];  
    va\_ctx.audio\_dec\_ctx = va\_ctx.audio\_stream->codec;  
    break;  
default:  
    fprintf(stderr, "Error: unsupported MediaType: %s\n", av\_get\_media\_type\_string(type));  
    return -1;  
}  

}

return 0;  

}

整体初始化的函数代码为

int InitDemuxContext(IOFileName &files, DemuxingVideoAudioContex &va_ctx)
{
int ret = 0, width, height;

* register all formats and codecs *
av_register_all();

* open input file, and allocate format context *
if (avformat_open_input(&(va_ctx.fmt_ctx), files.src_filename, NULL, NULL) < 0)
{
fprintf(stderr, “Could not open source file %s\n”, files.src_filename);
return -1;
}

* retrieve stream information *
if (avformat_find_stream_info(va_ctx.fmt_ctx, NULL) < 0)
{
fprintf(stderr, “Could not find stream information\n”);
return -1;
}

if (open_codec_context(files, va_ctx, AVMEDIA_TYPE_VIDEO) >= 0)
{
files.video_dst_file = fopen(files.video_dst_filename, “wb”);
if (!files.video_dst_file)
{
fprintf(stderr, “Could not open destination file %s\n”, files.video_dst_filename);
return -1;
}

*\* allocate image where the decoded image will be put \**  
va\_ctx.width = va\_ctx.video\_dec\_ctx->width;  
va\_ctx.height = va\_ctx.video\_dec\_ctx->height;  
va\_ctx.pix\_fmt = va\_ctx.video\_dec\_ctx->pix\_fmt;  
ret = av\_image\_alloc(va\_ctx.video\_dst\_data, va\_ctx.video\_dst\_linesize, va\_ctx.width, va\_ctx.height, va\_ctx.pix\_fmt, 1);  
if (ret < 0)  
{  
    fprintf(stderr, "Could not allocate raw video buffer\n");  
    return -1;  
}  
va\_ctx.video\_dst\_bufsize = ret;  

}

if (open_codec_context(files, va_ctx, AVMEDIA_TYPE_AUDIO) >= 0)
{
files.audio_dst_file = fopen(files.audio_dst_filename, “wb”);
if (!files.audio_dst_file)
{
fprintf(stderr, “Could not open destination file %s\n”, files.audio_dst_filename);
return -1;
}
}

if (va_ctx.video_stream)
{
printf(“Demuxing video from file ‘%s’ into ‘%s’\n”, files.src_filename, files.video_dst_filename);
}

if (va_ctx.audio_stream)
{
printf(“Demuxing audio from file ‘%s’ into ‘%s’\n”, files.src_filename, files.audio_dst_filename);
}

* dump input information to stderr *
av_dump_format(va_ctx.fmt_ctx, 0, files.src_filename, 0);

if (!va_ctx.audio_stream && !va_ctx.video_stream)
{
fprintf(stderr, “Could not find audio or video stream in the input, aborting\n”);
return -1;
}

return 0;  

}

分配AVFrame和初始化AVPacket对象:

va_ctx.frame = av_frame_alloc(); //分配AVFrame结构对象
if (!va_ctx.frame)
{
fprintf(stderr, “Could not allocate frame\n”);
ret = AVERROR(ENOMEM);
goto end;
}

* initialize packet, set data to NULL, let the demuxer fill it *
av_init_packet(&va_ctx.pkt); //初始化AVPacket对象
va_ctx.pkt.data = NULL;
va_ctx.pkt.size = 0;

第二步: 循环解析视频文件的包数据

解析视频文件的循环代码段为:

* read frames from the file *
while (av_read_frame(va_ctx.fmt_ctx, &va_ctx.pkt) >= 0) //从输入程序中读取一个包的数据
{
AVPacket orig_pkt = va_ctx.pkt;
do
{
ret = Decode_packet(files, va_ctx, &got_frame, 0); //解码这个包
if (ret < 0)
break;
va_ctx.pkt.data += ret;
va_ctx.pkt.size -= ret;
} while (va_ctx.pkt.size > 0);
av_packet_unref(&orig_pkt);
}

这部分代码逻辑上非常简单,首先调用av_read_frame函数,从文件中读取一个packet的数据,并实现了一个Decode_packet对这个packet进行解码。Decode_packet函数的实现如下:

int Decode_packet(IOFileName &files, DemuxingVideoAudioContex &va_ctx, int *got_frame, int cached)
{
int ret = 0;
int decoded = va_ctx.pkt.size;
static int video_frame_count = 0;
static int audio_frame_count = 0;

*got_frame = 0;

if (va_ctx.pkt.stream_index == va_ctx.video_stream_idx)
{
* decode video frame *
ret = avcodec_decode_video2(va_ctx.video_dec_ctx, va_ctx.frame, got_frame, &va_ctx.pkt);
if (ret < 0)
{
printf(“Error decoding video frame (%d)\n”, ret);
return ret;
}

if (*got_frame)
{
if (va_ctx.frame->width != va_ctx.width || va_ctx.frame->height != va_ctx.height ||
va_ctx.frame->format != va_ctx.pix_fmt)
{
/* To handle this change, one could call av_image_alloc again and

  • decode the following frames into another rawvideo file. */

    printf(“Error: Width, height and pixel format have to be ”
    “constant in a rawvideo file, but the width, height or ”
    “pixel format of the input video changed:\n”
    “old: width = %d, height = %d, format = %s\n”
    “new: width = %d, height = %d, format = %s\n”,
    va_ctx.width, va_ctx.height, av_get_pix_fmt_name((AVPixelFormat)(va_ctx.pix_fmt)),
    va_ctx.frame->width, va_ctx.frame->height,
    av_get_pix_fmt_name((AVPixelFormat)va_ctx.frame->format));
    return -1;
    }

printf(“video_frame%s n:%d coded_n:%d pts:%s\n”, cached ? “(cached)” : “”, video_frame_count++, va_ctx.frame->coded_picture_number, va_ctx.frame->pts);

/* copy decoded frame to destination buffer:

  • this is required since rawvideo expects non aligned data */

av_image_copy(va_ctx.video_dst_data, va_ctx.video_dst_linesize,
(const uint8_t **)(va_ctx.frame->data), va_ctx.frame->linesize,
va_ctx.pix_fmt, va_ctx.width, va_ctx.height);

    *\* write to rawvideo file \**  
    fwrite(va\_ctx.video\_dst\_data[0], 1, va\_ctx.video\_dst\_bufsize, files.video\_dst\_file);  
}  

}
else if (va_ctx.pkt.stream_index == va_ctx.audio_stream_idx)
{
* decode audio frame *
ret = avcodec_decode_audio4(va_ctx.audio_dec_ctx, va_ctx.frame, got_frame, &va_ctx.pkt);
if (ret < 0)
{
printf(“Error decoding audio frame (%s)\n”, ret);
return ret;
}
/* Some audio decoders decode only part of the packet, and have to be

  • called again with the remainder of the packet data.
  • Sample: fate-suite/lossless-audio/luckynight-partial.shn
  • Also, some decoders might over-read the packet. */

decoded = FFMIN(ret, va_ctx.pkt.size);

if (*got_frame)
{
size_t unpadded_linesize = va_ctx.frame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)va_ctx.frame->format);
printf(“audio_frame%s n:%d nb_samples:%d pts:%s\n”,
cached ? “(cached)” : “”,
audio_frame_count++, va_ctx.frame->nb_samples,
va_ctx.frame->pts);

/* Write the raw audio data samples of the first plane. This works

  • fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
  • most audio decoders output planar audio, which uses a separate
  • plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
  • In other words, this code will write only the first audio channel
  • in these cases.
  • You should use libswresample or libavfilter to convert the frame
  • to packed data. */

    fwrite(va\_ctx.frame->extended\_data[0], 1, unpadded\_linesize, files.audio\_dst\_file);  
    

    }
    }

/* If we use frame reference counting, we own the data and need

  • to de-reference it when we don’t use it anymore */

if (*got_frame && files.refcount)
av_frame_unref(va_ctx.frame);

    return decoded;  

}

在该函数中,首先对读取到的packet中的stream_index分别于先前获取的音频和视频的stream_index进行对比来确定是音频还是视频流。而后分别调用相应的解码函数进行解码,以视频流为例,判断当前stream为视频流后,调用avcodec_decode_video2函数将流数据解码为像素数据,并在获取完整的一帧之后,将其写出到输出文件中。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值