概要
为了对齐CPU及优化等原因,导致frame的linesize[0]的实际宽大于理论宽,所以需要使用sws_scale函数进行转换。函数作用如下:
1.图像色彩空间转换,如:YUV420P->YUV422/RGB24
2.分辨率缩放/切换
3.前后图像滤波
整体架构流程
代码
av_register_all();
AVFormatContext *inpFormatCtx = NULL;
const char* inFilename = "/home/16THDD/xieyingbo/xieyingbo/big_buck_bunny.mp4";
const char* outFilename = "/home/16THDD/xieyingbo/xieyingbo/out.yuv";
int destwidth = 0, heightwidth = 0;
//1.打开流媒体文件
int ret = avformat_open_input(&inpFormatCtx, inFilename, NULL, NULL);
if(ret != 0)
{
printf("%s\n", "open input format error");
return;
}
//2.过去输入流信息
ret = avformat_find_stream_info(inpFormatCtx, NULL);
if(ret < 0)
{
printf("%s\n", "find stream error");
return;
}
//3.获取视频流文件
int videoIndex = av_find_best_stream(inpFormatCtx, AVMEDIA_TYPE_VIDEO, -1,-1, NULL, 0);
if(videoIndex < 0)
{
printf("%s\n", "find best stream error");
return;
}
//4.创建解码器上下文
AVCodeContext *codecCtx = avcodec_alloc_context3(NULL);
if(codecCtx == NULL)
{
printf("%s\n", "avcoec alloc codecCtx error");
return;
}
//5.获取输入的解码器参数
ret = avcodec_parameters_to_context(codecCtx, inpFormatCtx->streams[videoIndex]->codecpar);
if(ret < 0)
{
printf("%s\n", "avcoec parameter to codecCtx error");
return;
}
//6.找到当前解码器
AVCodec *avcodec = avcodec_find_decoder(codecCtx->codec_id);
if(avcodec == NULL)
{
printf("%s\n", "avcoec find decoder error");
return;
}
//7.打开当前解码器
ret = avcodec_open2(codecCtx, avcodec, NULL);
if(ret != 0)
{
printf("%s\n", "avcoec open2 error");
return;
}
//8.获取输入文件分辨率
destwidth = 640, destheight = 360;
//9.初始化视频格式转换上下文
#if 1
//修改分辨率为640 X 360的yuv420格式
enum AVPixelFormat destFormat = codecCtx->pix_fmt; //输出的颜色空间格式
SwsContext *swsCtx = sws_getContext(destwidth, destheight, destFormat, 640, 360, destFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL);
//输入宽,输入高,输入类型,输出宽,输出高,输出类型,使用图像拉伸缩放算法,输入滤波算法,输出滤波算法
#else
//修改分辨率为640 X 360的RGB24格式
enum AVPixelFormat destFormat = AV_PIX_FMT_RGB24;
SwsContext *swsCtx = sws_getContext(destwidth, destheight, destFormat, 1920, 1080, destFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL);
#endif
//10.创建出一块内存空间,用来保存转换后的数据
AVFrame *destframe = av_frame_alloc();
//11.获取转换后的空间大小
int size = av_image_get_buffer_size(codecCtx->pix_fmt, destwideth, destheight, 1);
//12.申请内存空间
uint8_t *outBuffer[4];
int linesize[4];
ret = av_image_alloc(outBuffer, linesize, destwidth, destheight, codecCtx->pix_fmt, 1);
if(ret < 0)
{
printf("%s\n", "av_image_alloc error");
return;
}
//13.按照avframe的格式重新分配到创建的内存空间destframe中(格式化)
ret = av_image_copy_to_buffer(outBUffer[0], linesize[0], destframe->data, destframe->linesize,codecCtx->pix_fmt, destwidth, destheight, 1);
if(ret < 0)
{
printf("%s\n", "av_image_copy_to_buffer error");
return;
}
FILE *dst_fp = fopen(outFIlename, "wb+");
if(dst_fp == NULL)
{
printf("%s\n", "open file outFIlename error");
return;
}
//14.循环读取源文件的帧数据
AVPacket packet;
av_init_packet(&packet);
AVFrame *frame = av_frame_alloc();
while(av_read_frame(inpFormatCtx >= 0))
{
if(packet.stream_index == videoIndex)
{
//15.发送packet到解码器
ret = avcodec_send_packet(codecCtx, &packet);
if(ret != 0)
{
printf("%s\n", "avcodec_send_packet error");
av_packet_unref(&packet);
return;
}
av_packet_unref(&packet);
//16.解码器回收解码后的数据
while(avcodec_receive_frame(codecCtx, frame) == 0)
{
sws_scale(swsCtx, frame->data, frame->linesize, 0, codecCtx->height. destframe->data, destframe->linesize);
#if 1
//17.存储数据(YUV420P)
fwirte(destframe->data[0], 1, destwidth *destheight, dst_fp); //存储Y
fwirte(destframe->data[1], 1, destwidth *destheight /4, dst_fp); //存储U
fwirte(destframe->data[2], 1, destwidth *destheight /4, dst_fp); //存储V
#else
//rgb24以打包的方式存在data[0]里面
fwrite(destframe->data[0], 1, destwidth * destheight * 3, dst_fp);
//Qt上画出
QImage image(destframe->data[0], destwidth, destheight, Format_RGB888);
emit GetImage(image,head.type); //发出信号
#endif
}
av_packet_unref(&packet);
}
}
小结
在转为RGB24后,可以在qt中接收到信号,并画在ui界面中。