FFmpeg 264编码保存mp4文件示例

这里使用FFmpeg 3版本的新接口来做编码工作。其中需要注意的是编码是有缓存延迟的,因此在最后需要重复给空帧并且不断读取解码帧来完成最后的缓存输出。
参考:
https://blog.csdn.net/hb707934728/article/details/81476756
https://blog.csdn.net/dangxw_/article/details/50974677

示例代码:

encoder_work::encoder_work()
{
	mWidth = 0;
	mHeight = 0;
	mFPS = 0;
	mYSize = 0;
	mUVSize = 0;
	mPTS = 0;
	mFmtCtx = NULL;
	mEncCtx = NULL;
	mYUVFrm = NULL;
	mEncoder = NULL;
}


int encoder_work::init(int w, int h, int fps, int bit_rate, char *outfile_name)
{
	printf("encoder work init========>\n");
	uninit();

	mWidth = w;
	mHeight = h;
	mFPS = fps;
	mYSize = w*h;
	mUVSize = mYSize / 4;
	
	//register file package like mp4 and all others
	av_register_all();
	//register all codecs
	avcodec_register_all();
	
	//1 create encoder
	mEncoder = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!mEncoder)
	{
		cout << " avcodec_find_encoder AV_CODEC_ID_H264 failed!" << endl;
		return -1;
	}
	//get encoder contex
	mEncCtx = avcodec_alloc_context3(mEncoder);
	if (!mEncCtx)
	{
		cout << " avcodec_alloc_context3 for encoder contx failed!" << endl;
		return -1;
	}
	//set encoder params
	//bit rate
	mEncCtx->bit_rate = bit_rate;
	
	mEncCtx->width = mWidth;
	mEncCtx->height = mHeight;
	mEncCtx->time_base = { 1,mFPS };
	
	//set gop size, in another way I frame gap
	mEncCtx->gop_size = 50;
	
	mEncCtx->max_b_frames = 0;
	mEncCtx->pix_fmt = AV_PIX_FMT_YUV420P;
	mEncCtx->codec_id = AV_CODEC_ID_H264;
	mEncCtx->thread_count = 4;
	mEncCtx->qmin = 10;
	mEncCtx->qmax = 51;
	mEncCtx->qcompress  = 0.6;

	//av_opt_set(mEncCtx->priv_data, "preset", "ultrafast", 0);
	av_opt_set(mEncCtx->priv_data, "tune", "zerolatency", 0);
	
	//global header info
	mEncCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	
	//open encoder
	int ret = avcodec_open2(mEncCtx,mEncoder,NULL);
	if (ret < 0)
	{
		cout << " avcodec_open2 encoder failed!" << endl;
		return -1;
	}
	cout << "avcodec_open2 encoder success!" << endl;
	
	//2 create out context
	avformat_alloc_output_context2(&mFmtCtx, 0, 0, outfile_name);
	
	//3 add video stream
	mOutStm = avformat_new_stream(mFmtCtx,NULL);
	mOutStm->id = 0;
	mOutStm->codecpar->codec_tag = 0;
	avcodec_parameters_from_context(mOutStm->codecpar,mEncCtx);
	
	cout << "===============================================" << endl;
	av_dump_format(mFmtCtx, 0, outfile_name, 1);
	cout << "===============================================" << endl;
	
	//alloc output yuv frame
	mYUVFrm = av_frame_alloc();
	mYUVFrm->format = AV_PIX_FMT_YUV420P;
	mYUVFrm->width = mWidth;
	mYUVFrm->height = mHeight;
	//alloc frame buffer
	ret = av_frame_get_buffer(mYUVFrm,32);
	
	if (ret < 0)
	{
		av_frame_free(&mYUVFrm);
		mYUVFrm = NULL;
		cout << " av_frame_get_buffer  failed!" << endl;
		return -1;
	}
	
	//5 write mp4 head
	ret = avio_open(&mFmtCtx->pb,outfile_name,AVIO_FLAG_WRITE);
	if (ret < 0)
	{
		cout << " avio_open  failed!" << endl;
		return -1;
	}
	ret = avformat_write_header(mFmtCtx, NULL);
	if (ret < 0)
	{
		cout << " avformat_write_header  failed!" << endl;
		return -1;
	}

}

int encoder_work::uninit()
{
	printf("encoder_work uninit-----\n");
	if(mFmtCtx)
	{
		//wirte file trailer
		av_write_trailer(mFmtCtx);
		
		//close file IO
		avio_close(mFmtCtx->pb);

		//clean context
		avformat_free_context(mFmtCtx);

		mFmtCtx = NULL;
	}
	
	if(mEncCtx)
	{
		//close encoder
		avcodec_close(mEncCtx);
		
		//clean encoder contex
		avcodec_free_context(&mEncCtx);

		mEncCtx = NULL;
	}

	if(mYUVFrm)
	{
		av_frame_free(&mYUVFrm);
		mYUVFrm = NULL;
	}

	mWidth = 0;
	mHeight = 0;
	mFPS = 0;
	mYSize = 0;
	mUVSize = 0;
	mPTS = 0;

	return 0;
}

int encoder_work::process(cv::Mat &rgb)
{
	cv::Mat yuv;
	cv::cvtColor(rgb, yuv, cv::COLOR_BGR2YUV_I420);

	return process(yuv.data);
}

int encoder_work::process(unsigned char *yuvData)
{
	if(!mYUVFrm)
	{
		printf("not ready env\n");
		return -1;
	}

	memcpy(mYUVFrm->data[0], yuvData, mYSize);
	memcpy(mYUVFrm->data[1], yuvData+mYSize, mUVSize);
	memcpy(mYUVFrm->data[2], yuvData+mYSize+mUVSize, mUVSize);
	mYUVFrm->pts = mPTS;
	mPTS = mPTS + 4000;
	//mPTS = mPTS + 1000000;
	//send to encoder
	int ret = avcodec_send_frame(mEncCtx, mYUVFrm);
	if (ret != 0)
	{
		char err[64] = {0};
		av_strerror(ret, err, 64);
		printf("encoder send frame %d err:%s\n",mPTS-4000, err);
		return -1;
	}
	AVPacket pkt;
	av_init_packet(&pkt);
	//receive from encoder
	//note that frame will not be received immediately
	ret = avcodec_receive_packet(mEncCtx,&pkt);
	if (ret != 0)
	{
		printf("encoder recieve frame %d err\n",mPTS-4000);
		return -1;
	}
	//write encoded frame to file
	av_interleaved_write_frame(mFmtCtx,&pkt);
	av_free_packet(&pkt);
	return 0;
}

int encoder_work::flushOut()
{
	//as frames will be hysteresis, we need flush all remained frames
	AVPacket pkt;
	av_init_packet(&pkt);
	int ret = 0;
	while(ret >= 0)
	{
		//flush out rest frames, send NULL frame data
		avcodec_send_frame(mEncCtx, NULL);
		//receive frame from encoder
		ret = avcodec_receive_packet(mEncCtx,&pkt);
		if (ret != 0)
		{
			printf("encoder recieve frame %d err\n",mPTS-4000);
			break;
		}
		//wirte encoded frame to file
		av_interleaved_write_frame(mFmtCtx,&pkt);
		av_free_packet(&pkt);
	}
	return 0;
}

头文件定义

class encoder_work
{
public:
	encoder_work();
	~encoder_work() { uninit(); }

	int init(int w, int h, int fps, int bit_rate, char *outfile_name);
	int uninit();
	int process(unsigned char *yuvData);
	int process(cv::Mat &rgb);
	int flushOut();

private:
	int mWidth;
	int mHeight;
	int mFPS;
	int mYSize;
	int mUVSize;
	int mPTS;
	AVCodec *mEncoder;
	AVCodecContext *mEncCtx;
	AVFormatContext *mFmtCtx;
	AVStream *mOutStm;
	AVFrame *mYUVFrm;
};

涉及到的一些头文件:

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
要将 YUV 数据编码为视频文件,可以使用 ffmpeg 库提供的 API。以下是一个简单的示例代码: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <libavutil/imgutils.h> #include <libavutil/opt.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> int main(int argc, char *argv[]) { int ret = 0; AVFormatContext *format_ctx = NULL; AVOutputFormat *output_fmt = NULL; AVStream *video_stream = NULL; AVCodec *codec = NULL; AVCodecContext *codec_ctx = NULL; AVPacket pkt = { 0 }; int video_frame_count = 0; int video_width = 640; int video_height = 480; int video_fps = 25; const char *output_filename = "output.mp4"; // 初始化 FFmpeg 库 av_register_all(); // 打开输出文件 ret = avformat_alloc_output_context2(&format_ctx, NULL, NULL, output_filename); if (ret < 0) { fprintf(stderr, "Failed to allocate output format context: %s\n", av_err2str(ret)); return ret; } output_fmt = format_ctx->oformat; // 添加视频流 codec = avcodec_find_encoder(output_fmt->video_codec); if (!codec) { fprintf(stderr, "Failed to find video encoder\n"); ret = AVERROR(EINVAL); goto end; } video_stream = avformat_new_stream(format_ctx, codec); if (!video_stream) { fprintf(stderr, "Failed to create video stream\n"); ret = AVERROR(EINVAL); goto end; } codec_ctx = avcodec_alloc_context3(codec); if (!codec_ctx) { fprintf(stderr, "Failed to allocate codec context\n"); ret = AVERROR(ENOMEM); goto end; } codec_ctx->width = video_width; codec_ctx->height = video_height; codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; codec_ctx->time_base = (AVRational) { 1, video_fps }; codec_ctx->framerate = (AVRational) { video_fps, 1 }; codec_ctx->gop_size = 10; codec_ctx->max_b_frames = 1; codec_ctx->bit_rate = 400000; codec_ctx->rc_min_rate = codec_ctx->rc_max_rate = codec_ctx->bit_rate; codec_ctx->sample_aspect_ratio = (AVRational) { 1, 1 }; if (format_ctx->oformat->flags & AVFMT_GLOBALHEADER) codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; ret = avcodec_open2(codec_ctx, codec, NULL); if (ret < 0) { fprintf(stderr, "Failed to open video encoder: %s\n", av_err2str(ret)); goto end; } ret = avcodec_parameters_from_context(video_stream->codecpar, codec_ctx); if (ret < 0) { fprintf(stderr, "Failed to copy codec parameters: %s\n", av_err2str(ret)); goto end; } av_dump_format(format_ctx, 0, output_filename, 1); // 打开输出文件文件头 ret = avio_open(&format_ctx->pb, output_filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Failed to open output file: %s\n", av_err2str(ret)); goto end; } ret = avformat_write_header(format_ctx, NULL); if (ret < 0) { fprintf(stderr, "Failed to write file header: %s\n", av_err2str(ret)); goto end; } // 编码 YUV 数据并文件 int y_size = video_width * video_height; int uv_size = y_size / 4; uint8_t *y_data = (uint8_t *) malloc(y_size); uint8_t *u_data = (uint8_t *) malloc(uv_size); uint8_t *v_data = (uint8_t *) malloc(uv_size); for (int i = 0; i < 300; i++) { // 生成测试数据 for (int j = 0; j < y_size; j++) { y_data[j] = i % 255; } for (int j = 0; j < uv_size; j++) { u_data[j] = (i + j) % 255; v_data[j] = (i + j + uv_size) % 255; } // 将 YUV 数据填充到 AVFrame 中 AVFrame *frame = av_frame_alloc(); frame->width = video_width; frame->height = video_height; frame->format = AV_PIX_FMT_YUV420P; av_image_fill_arrays(frame->data, frame->linesize, y_data, video_width, u_data, video_width / 2, v_data, video_width / 2, AV_PIX_FMT_YUV420P, 1); // 编码 AVFrame 并文件 ret = avcodec_send_frame(codec_ctx, frame); if (ret < 0) { fprintf(stderr, "Error sending frame: %s\n", av_err2str(ret)); av_frame_free(&frame); goto end; } while (ret >= 0) { ret = avcodec_receive_packet(codec_ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { fprintf(stderr, "Error receiving packet: %s\n", av_err2str(ret)); goto end; } pkt.stream_index = video_stream->index; av_packet_rescale_ts(&pkt, codec_ctx->time_base, video_stream->time_base); pkt.pts = video_frame_count * video_stream->time_base.den / (video_stream->time_base.num * video_fps); pkt.dts = pkt.pts; pkt.duration = video_stream->time_base.den / (video_stream->time_base.num * video_fps); pkt.pos = -1; av_interleaved_write_frame(format_ctx, &pkt); av_packet_unref(&pkt); } av_frame_free(&frame); video_frame_count++; } // 文件尾并释放资源 av_write_trailer(format_ctx); end: if (codec_ctx) { avcodec_free_context(&codec_ctx); } if (format_ctx) { if (format_ctx->pb) { avio_closep(&format_ctx->pb); } avformat_free_context(format_ctx); } if (ret < 0) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); return 1; } return 0; } ``` 在这个示例代码中,我们首先使用 avformat_alloc_output_context2 函数创建一个 AVFormatContext 对象,然后使用 avformat_new_stream 函数为该对象添加一个视频流。接着,我们使用 AVCodecContext 结构体设置了视频流的一些参数,例如分辨率、帧率、编码格式等。然后,我们使用 avcodec_open2 函数打开了该编码器,并使用 avcodec_parameters_from_context 函数将编码器参数复制到视频流的 codecpar 结构体中。最后,我们打开输出文件文件头,然后循环编码 YUV 数据并将编码后的数据文件中,最后文件尾并释放资源。 需要注意的是,在编码每一帧数据时,我们需要将 AVFrame 转换成编码器所需的格式,并使用 avcodec_send_frame 函数发送该 AVFrame,然后使用 avcodec_receive_packet 函数接收编码编码后的数据。在将编码后的数据文件前,需要将编码后的时间戳 PTS 和 DTS 进行适当的转换和设置。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值