FFmpeg解码流程:
1、注册所有容器格式和CODEC:avcodec_register_all();
2、查找对应的解码器:avcodec_find_decoder(AV_CODEC_ID_MJPEG);
3、分配编解码器参数数据结构avcodec_alloc_context3(AVCodec *codec);
4、对于部分编解码需要对上一步分配的数据结构进行初始化,因为某些参数在视频流中无效;
5、打开编解码器:avcodec_open2(AVCodecContext *ctx, AVCodec
*codec, NULL);
6、为解码帧分配内存:AVFrame *frame = av_frame_alloc();
7、初始化编解码输入数据结构AVPacket:av_init_packet(AVPacket
*packet);
8、将待解码的压缩视频数据放入数据结构AVPacket中:
(AVPacket)packet.data = inbuf(待解码视频数据缓存);
(AVPacket)packet.size = inbufsize(待解码视频数据长度);
9、调用解码函数开始解码:avcodec_decode_video2(AVCodecContext *ctx,AVFrame *frame,int *got,AVPacket *packet);
10、拷贝解码后的一帧数据,解码后的数据格式根据解码前的数据格式决定,例如,USB摄像头的MJPEG数据解码后是YUV422P格式,H264格式数据解码后是YUV420P格式;解码后的数据存放在AVFrame数据结构指向的缓存中,存放方式为:(AVFrame )frame->data[0]指向Y分量,(AVFrame )frame->data[1]指向U分量,(AVFrame )frame->data[2]指向V分量;(AVFrame )frame->linesize[0]、(AVFrame )frame->linesize[1]、(AVFrame )frame->linesize[2]分别为Y、U、V分量每行的长度。
#include <inttypes.h>
#include <math.h>
#include <limits.h>
#include <signal.h>
#include <libavutil/avstring.h>
#include <libavutil/mathematics.h>
#include <libavutil/pixdesc.h>
#include <libavutil/imgutils.h>
#include <libavutil/dict.h>
#include <libavutil/parseutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/avassert.h>
#include <libavutil/time.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
#include <libavcodec/avfft.h>
#include <libswresample/swresample.h>
#include <linux/fb.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
struct options
{
int frames;
int nodec;
int bplay;
int thread_count;
int64_t lstart;
char finput[256];
};
// check the video stream
static int find_video_stream(AVFormatContext* pCtx)
{
int videoStream = -1;
int i;
// check the video stream
for (i = 0; i < pCtx->nb_streams; i++)
{
if (pCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
break;
}
}
return videoStream;
}
//save the BMP ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#pragma pack(1)
// 2byte
typedef struct bmp_magic
{
unsigned char magic[2];
}magic_t;
// 4 * 3 = 12 Byte
typedef struct bmp_header
{
unsigned int file_size; //file size in Byte ,w * h * 3 + 54
unsigned short creater1; //0
unsigned short creater2; //0
unsigned int offset; //offset to image data: 54D, 36H
}header_t ;
typedef struct bmp_info
{
unsigned int header_size; //info size in bytes, equals 4o D, or 28H
unsigned int width; //file wideth in pide
unsigned int height; //file height in pide
unsigned short nplanes; //number of clor planes , 1
unsigned short bitspp; //bits per pidel, 24d, 18h
unsigned int compress_type; //compress type,default 0
unsigned int image_size; //image size in Byte. w * h * 3
unsigned int hres; //pideles per meter, 0
unsigned int vres; //pideles per meter, 0
unsigned int ncolors; //number of colors, 0
unsigned int nimpcolors; //important colors, 0
}info_t;
int gen_bmp_header(unsigned char *head,unsigned w, unsigned h,unsigned bytepp)
{
if(head==NULL)
return -1;
magic_t magic;
info_t info;
header_t header;
magic.magic[0] = 'B';
magic.magic[1] = 'M';
header.file_size = (w * h * bytepp + 54);
header.creater1 = 0;
header.creater2 = 0;
header.offset = (54);
info.header_size = (40);
info.width = (w);
info.height = (h);
info.nplanes = (1);
info.bitspp = (bytepp * 8);
info.compress_type = 0;
info.image_size = (w * h * bytepp);
info.hres = 0;
info.vres = 0;
info.ncolors = 0;
info.nimpcolors = 0;
unsigned char *p=head;
memcpy(p,&magic,sizeof(magic));
p+=sizeof(magic);
memcpy(p,&header,sizeof(header));
p+=sizeof(header);
memcpy(p,&info,sizeof(info));
return 0;
}
static void saveBitMap(AVFrame *pFrameRGB, int width, int height, int index, int bpp)
{
unsigned char head[54];
int i;
FILE *fp = NULL;
memset(head, 0, 54);
gen_bmp_header(head,width, height,bpp/8);
fp = fopen("./10.bmp", "w+");
if (fp == NULL)
{
av_log(NULL, AV_LOG_INFO, "fopen BMP error\n");
return ;
}
fwrite(head, 54, 1, fp);
fwrite(pFrameRGB->data[0], width*height*bpp/8, 1, fp);
fclose(fp);
fp = NULL;
}
//save the BMP ---------------------------------------------------
int main(int argc, char **argv)
{
AVCodecContext *pCodecCtxVideo = 0;//video
AVCodec *pCodecVideo = 0;//video
AVPacket packet;
AVFrame *pFrameVideo = 0;//video
AVFrame *pFrameRGB = 0;//save the RGB
AVDictionary *options = NULL;
FILE *fpo1 = NULL;
FILE *fpo2 = NULL;
int nframe;
int err;
struct options opt;
int got_picture;
int picwidth, picheight, linesize;
unsigned char *pBuf;
int i;
int64_t timestamp;
int usefo = 0;
int dusecs;
float usecs1 = 0;
float usecs2 = 0;
struct timeval elapsed1, elapsed2;
int decoded = 0;
int ret = -1;
int videoStream = -1;
av_register_all();
avdevice_register_all();
AVInputFormat *inputFormat = av_find_input_format("v4l2");
AVFormatContext *pCtx=avformat_alloc_context();
av_dict_set(&options,"video_size","640*480",0);
err = avformat_open_input(&pCtx, "/dev/video0", NULL, &options);
if (err < 0)
{
printf("\n->(avformat_open_input)\tERROR:\t%d\n", err);
return -1;
}
printf("=========================\n");
err = avformat_find_stream_info(pCtx, 0);
if (err < 0)
{
printf("\n->(avformat_find_stream_info)\tERROR:\t%d\n", err);
return -1;
}
av_dump_format(pCtx, 0, opt.finput, 0);
#if 1
// ************************VIDEO**********************************//
// check the video stream
videoStream = find_video_stream(pCtx);
if (videoStream < 0)
{
printf("there is not video stream !!!!!!! \n");
return -1;
}
pCodecCtxVideo= pCtx->streams[videoStream]->codec;
pCodecVideo = avcodec_find_decoder(pCodecCtxVideo->codec_id);
if (!pCodecVideo)
{
printf("\ncan't find the video decoder!\n");
return -1;
}
//open videoDecoder
ret = avcodec_open2(pCodecCtxVideo, pCodecVideo, 0);
if (ret < 0)
{
printf("avcodec_open2 error(video) \n");
return -1;
}
pFrameVideo = av_frame_alloc();
pFrameRGB = av_frame_alloc();
//------------------------------------------------------------------------//
#endif
int tmp = 10;
//fb0 init
openframebuffer();
init_fbmmap();
while(av_read_frame(pCtx, &packet) >= 0)
{
#if 1
//found the video frame !!!
if (packet.stream_index == videoStream)
{
int got;
int i;
#if 1
avcodec_decode_video2(pCodecCtxVideo, pFrameVideo,&got_picture,&packet);
printf("pFrameVideo->width = %d\n", pFrameVideo->width);
printf("pFrameVideo->height = %d\n", pFrameVideo->height);
printf("pFrameVideo->linesize[0] = %d\n", pFrameVideo->linesize[0]);
printf("pFrameVideo->linesize[1] = %d\n", pFrameVideo->linesize[1]);
printf("pFrameVideo->linesize[2] = %d\n", pFrameVideo->linesize[2]);
#endif
//yuv420p -> rgb24
int PictureSize = avpicture_get_size (AV_PIX_FMT_BGR24, pCodecCtxVideo->width, pCodecCtxVideo->height);
void *buf = (uint8_t*)av_malloc(PictureSize);
avpicture_fill ( (AVPicture *)pFrameRGB, buf,AV_PIX_FMT_BGR24, pCodecCtxVideo->width, pCodecCtxVideo->height);
struct SwsContext* pSwsCxt = sws_getContext(pFrameVideo->width,pFrameVideo->height,pCodecCtxVideo->pix_fmt,
pFrameVideo->width, pFrameVideo->height,AV_PIX_FMT_BGR24, SWS_BILINEAR,NULL,NULL,NULL);
sws_scale(pSwsCxt,pFrameVideo->data,pFrameVideo->linesize,0,pFrameVideo->height,pFrameRGB->data, pFrameRGB->linesize);
av_log(NULL, AV_LOG_INFO, "pFrameRGB->linesize[0] = %d\n", pFrameRGB->linesize[0]);
//save the ARGB into BMP file
saveBitMap (pFrameRGB, pCodecCtxVideo->width, pCodecCtxVideo->height, 10, 24);
fb_drawbmp(height, width, pFrame->data[0]);
}
#endif
}
closefb();
return 0;