使用FFMPEG库将YUV编码为H264

准备

ffmpeg 4.4

准备一段yuv420p的格式的视频原始数据

这里我们使用命令直接提取

 ffmpeg -i .\beautlWorld.mp4 -pixel_format yuv420p  -s 1280x720 yuv420p_1280x720.yuv

编码流程

大致可以分为以下几步:

1.初始化编码器并设置参数

2.初始化AVPacket和AVFrame,设置参数

3.读取视频文件,进行编码

4.释放内存,结束

编码

以下是完整代码

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

using namespace std;

const char* inFileName = "./yuv420p_1280x720.yuv";
const char* outFileName = "./encode_yuv420p_1280x720.h264";


int encode(AVCodecContext* codecContent, AVPacket* packet, AVFrame* frame, FILE* outFile)
{
	//编码
	int ret = avcodec_send_frame(codecContent, frame);
	if (ret < 0)
	{
		fprintf(stderr, "Error sending a frame for encoding\n");
		return -1;
	}

	while (ret == 0)
	{

		ret = avcodec_receive_packet(codecContent, packet);

		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
			return 0;
		}
		else if (ret < 0) {
			fprintf(stderr, "Error encoding video frame\n");
			return -1;
		}

		if (ret == 0)
		{
			fwrite(packet->data, 1, packet->size, outFile);
		}

	}
}



int main(int argc, char* argv[])
{

	int ret = 0;

	AVCodec* codec = nullptr;
	AVCodecContext* codecContent = nullptr;
	AVPacket* packet = nullptr;
	AVFrame* frame = nullptr;

	FILE* inFile = nullptr;
	FILE* outFile = nullptr;



	//查找指定编码器
	codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (codec == nullptr)
	{
		printf("could not find h264 encoder!");
		return -1;
	}

	//申请编码器上下文
	codecContent = avcodec_alloc_context3(codec);
	if (codecContent == nullptr)
	{
		printf("could not alloc h264 content!");
		return -1;
	}

	//必设参数
	codecContent->width = 1280;
	codecContent->height = 720;
	codecContent->time_base = AVRational{ 1, 25 };


	codecContent->pix_fmt = AV_PIX_FMT_YUV420P;
	codecContent->gop_size = 60; //关键帧间隔,默认250
	codecContent->framerate = AVRational{ 25, 1 };

	//初始化编码器上下文
	ret = avcodec_open2(codecContent, codec, NULL);
	if (ret < 0) {
		fprintf(stderr, "Could not open codec: %d\n", ret);
		exit(1);
	}


	packet = av_packet_alloc();
	if (packet == nullptr)
	{
		printf("alloc packet error");
		return -1;
	}

	frame = av_frame_alloc();
	if (packet == nullptr)
	{
		printf("alloc frame error");
		return -1;
	}

	//必设参数
	frame->width = codecContent->width;
	frame->height = codecContent->height;
	frame->format = codecContent->pix_fmt;

	//设置该参数将导致视频全是I帧,忽略gop_size
	//frame->pict_type = AV_PICTURE_TYPE_I;


	//申请视频数据存储空间
	ret = av_frame_get_buffer(frame, 0);
	if (ret)
	{
		printf("alloc frame buffer error!");
		return -1;
	}

	inFile = fopen(inFileName, "rb");
	if (inFile == nullptr)
	{
		printf("error to open file: %s\n", inFileName);
		return -1;
	}

	outFile = fopen(outFileName, "wb");
	if (inFile == nullptr)
	{
		printf("error to open file: %s\n", outFileName);
		return -1;
	}

	//帧数记录
	int framecount = 0;
	
	frame->pts = 0;

	int start_time = av_gettime() / 1000; //毫秒级


	while (!feof(inFile))
	{
		ret = av_frame_is_writable(frame);
		if (ret < 0)
		{
			ret = av_frame_make_writable(frame);
		}


		fread(frame->data[0], 1, frame->width * frame->height, inFile); //y
		fread(frame->data[1], 1, frame->width * frame->height / 4, inFile); //u
		fread(frame->data[2], 1, frame->width * frame->height / 4, inFile);  //v

		printf("encode frame num: %d\n", ++framecount);


		frame->pts += 1000 / (codecContent->time_base.den / codecContent->time_base.num);
		encode(codecContent, packet, frame, outFile);

	}

	encode(codecContent, packet, nullptr, outFile);
	printf("encode time cost: %d ms\n ", av_gettime() / 1000 - start_time);

	av_packet_free(&packet);
	av_frame_free(&frame);
	avcodec_free_context(&codecContent);
	fclose(inFile);
	fclose(outFile);


	return 0;
}

小结

AVCodecContext

对于视频编码器而言必须设置以下参数:

AVRational time_base;

int width, height;

以下由用户设置,可选

gop_size: 默认250,也就是每250帧一个关键帧,可选设置

max_b_frames: 默认3, b帧最大连续个数

bit_rate: 平均比特率

avcodec_alloc_context3

AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);

为AVCodecContext结构申请空间,并使用默认值初始化!最后使用avcodec_free_context进行释放。

avcodec_open2

int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);

调用该函数前:time_base(也就是每帧显示的时间),width(帧宽), height(帧高)

av_frame_get_buffer

int av_frame_get_buffer(AVFrame *frame, int align);

为音视频分配数据存储空间。调用该函数前需要设置AVFRAME几个初始值。

音频:1.sample format  2.nb_samples 3.channel_layout

视频:1.pixel format 2.width 3.height  

align:直接对其,默认给0,根据当前CPU自行设置

CSDN下载地址:使用FFMPEG编码YUV420P成H264资源-CSDN文库

github地址:yunxiaobaobei/VideoEncode (github.com)

以下是将Mat YUV编码H264流的C++代码示例,使用FFmpeg: ```c++ #include <iostream> #include <fstream> #include <opencv2/opencv.hpp> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/imgutils.h> #include <libswscale/swscale.h> int main(int argc, char* argv[]) { // Check input arguments if (argc < 4) { std::cerr << "Usage: " << argv[0] << " input_file width height output_file" << std::endl; return 1; } // Initialize FFmpeg av_register_all(); // Open input file const char* input_file = argv[1]; int width = std::stoi(argv[2]); int height = std::stoi(argv[3]); cv::Mat yuv_image(height * 3 / 2, width, CV_8UC1); std::ifstream input_stream(input_file, std::ios::binary); if (!input_stream.is_open()) { std::cerr << "Failed to open input file" << std::endl; return 1; } // Open output file const char* output_file = argv[4]; AVFormatContext* format_context = nullptr; if (avformat_alloc_output_context2(&format_context, nullptr, nullptr, output_file) < 0) { std::cerr << "Failed to allocate output context" << std::endl; return 1; } // Open output stream AVOutputFormat* output_format = format_context->oformat; AVStream* stream = avformat_new_stream(format_context, nullptr); if (!stream) { std::cerr << "Failed to create output stream" << std::endl; return 1; } stream->id = format_context->nb_streams - 1; AVCodecContext* codec_context = stream->codec; codec_context->codec_id = output_format->video_codec; codec_context->codec_type = AVMEDIA_TYPE_VIDEO; codec_context->width = width; codec_context->height = height; codec_context->pix_fmt = AV_PIX_FMT_YUV420P; codec_context->time_base = { 1, 25 }; if (format_context->oformat->flags & AVFMT_GLOBALHEADER) { codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } AVCodec* codec = avcodec_find_encoder(codec_context->codec_id); if (!codec) { std::cerr << "Failed to find encoder" << std::endl; return 1; } if (avcodec_open2(codec_context, codec, nullptr) < 0) { std::cerr << "Failed to open codec" << std::endl; return 1; } av_dump_format(format_context, 0, output_file, 1); if (!(output_format->flags & AVFMT_NOFILE)) { if (avio_open(&format_context->pb, output_file, AVIO_FLAG_WRITE) < 0) { std::cerr << "Failed to open output file" << std::endl; return 1; } } if (avformat_write_header(format_context, nullptr) < 0) { std::cerr << "Failed to write header" << std::endl; return 1; } // Initialize video converter struct SwsContext* converter = sws_getContext(width, height, AV_PIX_FMT_GRAY8, width, height, AV_PIX_FMT_YUV420P, 0, nullptr, nullptr, nullptr); if (!converter) { std::cerr << "Failed to create video converter" << std::endl; return 1; } // Encode frames AVFrame* frame = av_frame_alloc(); frame->width = width; frame->height = height; frame->format = codec_context->pix_fmt; if (av_frame_get_buffer(frame, 0) < 0) { std::cerr << "Failed to allocate frame buffer" << std::endl; return 1; } AVPacket packet = { 0 }; int frame_count = 0; while (input_stream.read(reinterpret_cast<char*>(yuv_image.data), yuv_image.total())) { // Convert YUV image to AVFrame sws_scale(converter, &yuv_image.data, &yuv_image.step, 0, height, frame->data, frame->linesize); // Encode frame frame->pts = frame_count++; int result = avcodec_send_frame(codec_context, frame); if (result < 0) { std::cerr << "Failed to send frame" << std::endl; return 1; } while (result >= 0) { result = avcodec_receive_packet(codec_context, &packet); if (result == AVERROR(EAGAIN) || result == AVERROR_EOF) { break; } if (result < 0) { std::cerr << "Failed to receive packet" << std::endl; return 1; } av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base); packet.stream_index = stream->index; if (av_interleaved_write_frame(format_context, &packet) < 0) { std::cerr << "Failed to write packet" << std::endl; return 1; } av_packet_unref(&packet); } } // Flush encoder int result = avcodec_send_frame(codec_context, nullptr); if (result < 0) { std::cerr << "Failed to send frame" << std::endl; return 1; } while (result >= 0) { result = avcodec_receive_packet(codec_context, &packet); if (result == AVERROR(EAGAIN) || result == AVERROR_EOF) { break; } if (result < 0) { std::cerr << "Failed to receive packet" << std::endl; return 1; } av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base); packet.stream_index = stream->index; if (av_interleaved_write_frame(format_context, &packet) < 0) { std::cerr << "Failed to write packet" << std::endl; return 1; } av_packet_unref(&packet); } // Close output file av_write_trailer(format_context); if (format_context && !(output_format->flags & AVFMT_NOFILE)) { avio_close(format_context->pb); } avcodec_close(codec_context); avformat_free_context(format_context); av_frame_free(&frame); sws_freeContext(converter); input_stream.close(); return 0; } ``` 请注意,此示例假设输入文件为YUV 4:2:0格式的二进制文件,其中Y,U和V平面以交替方式存储。如果您的输入格式不同,请根据需要进行修改。此外,此示例假设您已安装OpenCV和FFmpeg
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

破浪征程

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值