前面大概地介绍了图像格式和不同格式之间的转换、h264的一些知识和FFmpeg编码和解码的基本流程。现在分别为编码和解码给出一个例子。例子是使用参考output_example.cpp和前面介绍中说到的老外写的一个例子。不说那么多了,上代码。
编码:
#ifdef __cplusplus
#define __STDC_CONSTANT_MACROS
#ifdef _STDINT_H
#undef _STDINT_H
#endif
# include <stdint.h>
#endif
extern "C"
{
#include<libavcodec/avcodec.h>
#include<libavformat/avformat.h>
#include<libswscale/swscale.h>
}
#include <iostream>
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
using namespace std;
int g_width = 352;
int g_height = 288;
int g_video_outbuff_size;
uint8_t* g_video_outbuff = NULL;
AVPixelFormat g_pix_fmt = AV_PIX_FMT_YUV420P;
//init Video Stream and return it
AVStream* getVideoStream(AVFormatContext* fmt_ctx)
{
AVStream* stream = NULL;
stream = avformat_new_stream(fmt_ctx, NULL);
if( stream == NULL)
{
fprintf(stderr, "new stream fail\n");
exit(1);
}
AVCodecContext* codec_ctx = stream->codec;
codec_ctx->codec_id = fmt_ctx->oformat->video_codec;
codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->bit_rate = 400000;
codec_ctx->gop_size = 3;
codec_ctx->pix_fmt = g_pix_fmt;
codec_ctx->width = g_width;
codec_ctx->height = g_height;
codec_ctx->time_base.num = 1;
codec_ctx->time_base.den = 25;
codec_ctx->me_range = 16;
codec_ctx->max_qdiff = 4;
codec_ctx->qmin = 10;
codec_ctx->qmax = 51;
codec_ctx->qcompress = 0.6;
if( codec_ctx->codec_id == CODEC_ID_MPEG2VIDEO )
codec_ctx->max_b_frames = 2;
if( codec_ctx->codec_id == CODEC_ID_MPEG1VIDEO)
codec_ctx->mb_decision = 2;
// some formats want stream headers to be separate
if(!strcmp(fmt_ctx->oformat->name, "mp4")
|| !strcmp(fmt_ctx->oformat->name, "mov")
|| !strcmp(fmt_ctx->oformat->name, "3gp")
)
{
codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return stream;
}
void initEncoder(AVStream* stream)
{
AVCodecContext* codec_ctx = stream->codec;
AVCodec* encoder = avcodec_find_encoder(codec_ctx->codec_id);
if( encoder == NULL )
{
fprintf(stderr, "cann't find the encoder\n");
exit(1);
}
if( avcodec_open2(codec_ctx, encoder, NULL) < 0 )
{
fprintf(stderr, "could not open video codec\n");
exit(1);
}
}
AVFrame* getAVFrame()
{
int size = avpicture_get_size(g_pix_fmt, g_width, g_height);
uint8_t* buff = (uint8_t*)av_malloc(size);
if( buff == NULL)
{
fprintf(stderr, "av malloc fail\n");
exit(1);
}
AVFrame* frame = av_frame_alloc();
if( frame == NULL)
{
fprintf(stderr, "alloc frame fail\n");
exit(1);
}
avpicture_fill((AVPicture*)frame, buff, g_pix_fmt,
g_width, g_height);
return frame;
}
void writeFrame(AVFormatContext* fmt_ctx, AVStream* stream, AVFrame* frame)
{
int ret, out_size;
AVPacket packet;
AVCodecContext* codec_ctx = stream->codec;
if (fmt_ctx->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that