介绍
本例程主要介绍按帧长从裸码流(h264)文件中读取每帧码流,通过avpacket传给解码器进行解码。
创建解码器
//必须先注册编解码器之后, 才能查找解码器等操作
avcodec_register_all();
//查找解码器
enum AVCodecID emCodecId = AV_CODEC_ID_H264;
AVCodec* ptCodec = avcodec_find_decoder(emCodecId);
ASSERT(ptCodec != NULL);
AVCodecContext* ptCodecCtx = avcodec_alloc_context3(ptCodec);
ASSERT(ptCodecCtx != NULL);
if (avcodec_open2(ptCodecCtx, ptCodec, NULL) < 0) {
printf("Could not open codec\n");
return -1;
}
注意上述代码只是根据解码器类型创建解码器,这对于h264格式是可以的,码流的宽高、pixfmt等信息此时并没有,必须得等到解码器解码第一帧之后才会赋值给AVCodecContext。Mpeg4码流验证也是一样的效果
初始化解码前后帧数据结构
AVFrame–解码后帧结构
AVPacket–解码前帧结构
//初始化解码后ptFrame
AVFrame* ptFrame = av_frame_alloc();
ASSERT(ptFrame != NULL);
//初始化解码前tPacket
AVPacket tPacket;
av_init_packet(&tPacket);
解码
每帧的数据和长度分别赋非AVPacket,然后传给解码器解码
tPacket.data = pbyInputbuf; //一帧数据地址
tPacket.size = pbyPacketLen[0]; //一帧数据长度
dwGotPicture为非0,表示完全解码一帧
ptFrame是解码后的图像
int dwGotPicture = 0;
int dwRet = avcodec_decode_video2(ptCodecCtx, ptFrame, &dwGotPicture, &tPacket);
if (dwRet < 0)
{
printf("decoder error:%d, FrameNum:%d\n", dwRet, dwIdex);
goto NEXT;
}
if(dwGotPicture)
{
}
例程
#include <stdio.h>
extern "C"
{
#include <libavcodec/avcodec.h>
};
#define ASSERT(X) if(!(X)){printf("####[%s:%d]assert failed:%s\n", __FUNCTION__, __LINE__, #X);}
#define _FILE_NAME_LEN 64 //长度文件名长度
#define _READ_BUF_SIZE (10<<20) //输入码流缓存大小
#define _FRAME_NUM 20 //码流帧数
#define _VID_WIDTH 1920
#define _VID_HEIGHT 1080
int main()
{
AVCodec* ptCodec = NULL; //解码器
AVCodecContext* ptCodecCtx = NULL; //解码器上下文
AVFrame* ptFrame = NULL; //解码后帧
AVPacket tPacket; //待解码帧, 这里的tPacket得保证是完成的一帧
//必须先注册编解码器之后, 才能查找解码器等操作
avcodec_register_all();
//查找解码器
enum AVCodecID emCodecId = AV_CODEC_ID_H264;
ptCodec = avcodec_find_decoder(emCodecId);
ASSERT(ptCodec != NULL);
ptCodecCtx = avcodec_alloc_context3(ptCodec);
ASSERT(ptCodecCtx != NULL);
if (avcodec_open2(ptCodecCtx, ptCodec, NULL) < 0)
{
printf("Could not open codec\n");
return -1;
}
av_log(NULL, AV_LOG_ERROR,"cur log level:%d\n", av_log_get_level());
av_log(NULL, AV_LOG_ERROR,"codec:%p, %p codectype:%d, pixfmt:%d\n",
ptCodec, ptCodecCtx->codec, ptCodec->type, ptCodecCtx->pix_fmt);
av_log(NULL, AV_LOG_ERROR,"coded_width:%d, coded_height:%d, (%d,%d)\n",
ptCodecCtx->coded_width,
ptCodecCtx->coded_height, ptCodecCtx->width, ptCodecCtx->height);
//初始化解码后ptFrame
ptFrame = av_frame_alloc();
ASSERT(ptFrame != NULL);
//初始化解码前tPacket
av_init_packet(&tPacket);
const char* pszInputFileName = "1080P.h264";//"704x576.mpeg4";//输入为待解码码流文件
const char* pszOutPutFileName = "1080P.yuv";//"704x576.yuv";//解码后图像保存文件
char arbyInputLenFileName[_FILE_NAME_LEN]; //输入码流长度文件,按该文件数据读取固定长度码流为一帧
memset(arbyInputLenFileName, 0, _FILE_NAME_LEN);
sprintf(arbyInputLenFileName, "%s.len", pszInputFileName);
printf("InputLenFileName:%s\n", arbyInputLenFileName);
//打开输入码流文件
FILE* pfInputFile = fopen(pszInputFileName, "rb");
ASSERT(pfInputFile != NULL);
//打开输入码流长度文件
FILE* pfInputLenFile = fopen(arbyInputLenFileName, "rb");
ASSERT(pfInputLenFile != NULL);
//打开输出图像文件
FILE* pfOutputFile = fopen(pszOutPutFileName, "wb");
ASSERT(pfOutputFile != NULL);
uint8_t* pbyInputbuf = new uint8_t[_READ_BUF_SIZE]; //输入码流缓冲
memset(pbyInputbuf, 0, _READ_BUF_SIZE * sizeof(uint8_t));
int* pbyPacketLen = new int[_FRAME_NUM]; //输入码流每帧长度数组
memset(pbyPacketLen, 0, _FRAME_NUM * sizeof(int));
//读取输入码流到buf中
int dwRealBufSize = fread(pbyInputbuf, sizeof(uint8_t), _READ_BUF_SIZE, pfInputFile);
printf("Read Buf Size:%d\n", dwRealBufSize);
fclose(pfInputFile);
pfInputFile = NULL;
//获取实际输入码流帧数即每帧码流长度
int dwRealFrameNum = 0;
for (; dwRealFrameNum < _FRAME_NUM; dwRealFrameNum++)
{
fscanf(pfInputLenFile, "%d", &pbyPacketLen[dwRealFrameNum]);
if (feof(pfInputLenFile))
{
//防止输入码流帧数比 _FRAME_NUM 少的情况
fclose(pfInputLenFile);
pfInputLenFile = NULL;
delete []arbyInputLenFileName;
break;
}
}
tPacket.data = pbyInputbuf;
tPacket.size = pbyPacketLen[0];
for (int dwIdex = 0; dwIdex < dwRealFrameNum; dwIdex++)
{
printf("Idex:%d, FrameLen:%d\n", dwIdex, pbyPacketLen[dwIdex]);
//fwrite(tPacket.data, 1, tPacket.size, pfOutputFile);
av_log(NULL, AV_LOG_ERROR, "pbyInputbuf:%p,%p tPacket.data:%p, %p\n",
pbyInputbuf, pbyInputbuf+dwRealBufSize,
tPacket.data, tPacket.data + pbyPacketLen[0]);
int dwGotPicture = 0;
int dwRet = avcodec_decode_video2(ptCodecCtx, ptFrame, &dwGotPicture, &tPacket);
if (dwRet < 0)
{
printf("decoder error:%d, FrameNum:%d\n", dwRet, dwIdex);
goto NEXT;
}
if (dwGotPicture)
{
printf("num:%d pixfmt:%d, w:%d, h:%d, line:%d,%d,%d\n",
uint8_t* byTempPtr = NULL;
byTempPtr = ptFrame->data[0];
//解码后的图像格式应该是yuv420p,不过很奇怪的是为什么avcodec_open2之后没有设置解码后的pixfmt,而是要等到
//第一帧解码之后,当然码流长度等信息也是第一帧解码之后的
//保存Y
for (int dwHIdex = 0; dwHIdex < ptFrame->height; dwHIdex++)
{
fwrite(byTempPtr, 1, ptFrame->width, pfOutputFile);
//这里要注意每一行的跨度和宽度的区别, 每行的有效数据长度为width
byTempPtr += ptFrame->linesize[0];
}
//保存U
byTempPtr = ptFrame->data[1];
for (int dwHIdex = 0; dwHIdex < ptFrame->height/2; dwHIdex++)
{
fwrite(byTempPtr, 1, ptFrame->width/2, pfOutputFile);
byTempPtr += ptFrame->linesize[1];
}
//保存V
byTempPtr = ptFrame->data[2];
for (int dwHIdex = 0; dwHIdex < ptFrame->height/2; dwHIdex++)
{
fwrite(byTempPtr, 1, ptFrame->width/2, pfOutputFile);
byTempPtr += ptFrame->linesize[2];
}
}
else
{
printf("decoder failed:%d, FrameNum:%d\n", dwRet, dwIdex);
}
NEXT:
tPacket.data += pbyPacketLen[dwIdex];
tPacket.size = pbyPacketLen[dwIdex+1];
}
fclose(pfOutputFile);
av_frame_free(&ptFrame);
avcodec_close(ptCodecCtx);
av_free(ptCodecCtx);
return 0;
}
另外需要说明的是解码后图像颜色空间为yuv420p,需要注意的是这里的y/u/v是分别保存在三个空间的。如果需要缩放解码后图像大小和改变其颜色空间,可以使用sws_scale实现