前言
本文的代码参考ffmpeg 的官方提供的demo,结合decode_video和demuxing_decoding2个例子,将一个本地的视频文件解码成非压缩的视频文件(官方提供的例子是将每一帧存为一个文件),并可以使用ffplay进行播放(参考代码的说明进行播放)
代码
/**
* @file
* video decoding with libavcodec API example
*
* @example decode_video.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#define INBUF_SIZE 4096
static FILE* outfile = NULL;
static size_t width = 0, height = 0;
static enum AVPixelFormat pix_fmt;
static uint8_t* video_dst_data[4] = { NULL };
static int video_dst_linesize[4];
static int video_dst_bufsize;
static int set_dec_flag = 0;
static int output_video_frame(AVFrame* frame)
{
if (frame->width != width || frame->height != height ||
frame->format != pix_fmt)
{
/* To handle this change, one could call av_image_alloc again and
* decode the following frames into another rawvideo file.
为了处理这一变化,可以再次调用av_image_alloc,并将以下帧解码为另一个rawvideo文件。
*/
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
width, height, av_get_pix_fmt_name(pix_fmt),
frame->width, frame->height,
av_get_pix_fmt_name(frame->format));
return -1;
}
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t**)(frame->data), frame->linesize,
pix_fmt, width, height);
/* write to rawvideo file 写到原始的视频文件中 */
fwrite(video_dst_data[0], 1, video_dst_bufsize, outfile);
return 0;
}
static int decode(AVCodecContext* dec_ctx, AVFrame* frame, AVPacket* pkt,
const char* filename)
{
char buf[1024];
int ret;
ret = avcodec_send_packet(dec_ctx, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
return - 1;
}
if (set_dec_flag == 0)
{
width = dec_ctx->width;//
height = dec_ctx->height;//
pix_fmt = dec_ctx->pix_fmt;
if (width <= 0 || height <= 0)//数据无效,直接退出
{
return -1;
}
ret = av_image_alloc(video_dst_data, video_dst_linesize,
width, height, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
return -1;
}
video_dst_bufsize = ret;
set_dec_flag = 1;
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)//需要读取更多数据
return 0;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
return -1;
}
fflush(stdout);
return output_video_frame(frame);
}
}
/// <summary>
/// 将视频文件解码成非压缩的视频文件,filename文件的类型需要为参数codec_id类型的视频文件,否则无法进行解码(如果是复合文件,请参考demuxing_decoding的例子)
/// </summary>
/// <param name="codec_id[in]">要解码的视频类型</param>
/// <param name="filename[in]">要解码的文件</param>
/// <param name="outfilename[in]">输出文件</param>
/// <returns></returns>
int decode_video(enum AVCodecID codec_id, char const* filename, char const* outfilename)
{
const AVCodec* codec = NULL;
AVCodecParserContext* parser = NULL;
AVCodecContext* c = NULL;
FILE* f = NULL;
AVFrame* frame = NULL;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t* data;
size_t data_size;
int ret;
AVPacket* pkt = NULL;
pkt = av_packet_alloc();
if (!pkt)exit(1);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams)
* 设置缓冲区的末尾为0(这确保了损坏的MPEG流不会发生过度读取)
*/
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the codec decoder */
codec = avcodec_find_decoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
goto end;
}
parser = av_parser_init(codec->id);
if (!parser) {
fprintf(stderr, "parser not found\n");
goto end;
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
goto end;
}
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
goto end;
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
goto end;
}
outfile = fopen(outfilename, "wb+");
if (!outfile) {
fprintf(stderr, "could not open %s\n", outfilename);
goto end;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
while (!feof(f)) {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, f);
if (!data_size)
break;
/* use the parser to split the data into frames */
data = inbuf;
while (data_size > 0) {
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size)//要以形成一个包过后,才能去解析,可能要经过多次解析才可以构成一个包
{
if (decode(c, frame, pkt, outfilename) == -1)//解析失败,直接返回
goto end;
}
}
}
/* flush the decoder */
decode(c, frame, NULL, outfilename);
printf("Play the output video file with the command\nffplay -f rawvideo -pix_fmt %s -video_size %zux%zu %s\n", av_get_pix_fmt_name(pix_fmt), width, height, outfilename);
end:
if(pkt != NULL)av_packet_free(&pkt);
if(f != NULL) fclose(f);
if(outfile != NULL) fclose(outfile);
if(parser != NULL) av_parser_close(parser);
if(c != NULL) avcodec_free_context(&c);
if(frame) av_frame_free(&frame);
if(video_dst_data[0]!= NULL) av_freep(&video_dst_data[0]);
return 0;
}
调用
decode_video(AV_CODEC_ID_H264, "test.h264", "test_h264_data2");