利用FFmpeg对RGB图像进行H264编码

代码摘自:https://stackoverflow.com/questions/2940671/how-does-one-encode-a-series-of-images-into-h264-using-the-x264-c-api

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}


static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
struct SwsContext *sws_context = NULL;

static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
	const int in_linesize[1] = { 3 * c->width };
	sws_context = sws_getCachedContext(sws_context,
		c->width, c->height, AV_PIX_FMT_RGB24,
		c->width, c->height, AV_PIX_FMT_YUV420P,
		0, 0, 0, 0);
	sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
		c->height, frame->data, frame->linesize);
}

uint8_t* generate_rgb(int width, int height, int pts, uint8_t *rgb) {
	int x, y, cur;
	rgb = (uint8_t *)realloc(rgb, 3 * sizeof(uint8_t) * height * width);
	for (y = 0; y < height; y++) {
		for (x = 0; x < width; x++) {
			cur = 3 * (y * width + x);
			rgb[cur + 0] = 0;
			rgb[cur + 1] = 0;
			rgb[cur + 2] = 0;
			if ((frame->pts / 25) % 2 == 0) {
				if (y < height / 2) {
					if (x < width / 2) {
						/* Black. */
					}
					else {
						rgb[cur + 0] = 255;
					}
				}
				else {
					if (x < width / 2) {
						rgb[cur + 1] = 255;
					}
					else {
						rgb[cur + 2] = 255;
					}
				}
			}
			else {
				if (y < height / 2) {
					rgb[cur + 0] = 255;
					if (x < width / 2) {
						rgb[cur + 1] = 255;
					}
					else {
						rgb[cur + 2] = 255;
					}
				}
				else {
					if (x < width / 2) {
						rgb[cur + 1] = 255;
						rgb[cur + 2] = 255;
					}
					else {
						rgb[cur + 0] = 255;
						rgb[cur + 1] = 255;
						rgb[cur + 2] = 255;
					}
				}
			}
		}
	}
	return rgb;
}

/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
	AVCodec *codec;
	int ret;

	codec = avcodec_find_encoder((AVCodecID)codec_id);
	if (!codec) {
		fprintf(stderr, "Codec not found\n");
		exit(1);
	}
	c = avcodec_alloc_context3(codec);
	if (!c) {
		fprintf(stderr, "Could not allocate video codec context\n");
		exit(1);
	}
	c->bit_rate = 400000;
	c->width = width;
	c->height = height;
	c->time_base.num = 1;
	c->time_base.den = fps;
	c->keyint_min = 600;
	c->pix_fmt = AV_PIX_FMT_YUV420P;
	if (codec_id == AV_CODEC_ID_H264)
		av_opt_set(c->priv_data, "preset", "slow", 0);
	if (avcodec_open2(c, codec, NULL) < 0) {
		fprintf(stderr, "Could not open codec\n");
		exit(1);
	}
	file = fopen(filename, "wb");
	if (!file) {
		fprintf(stderr, "Could not open %s\n", filename);
		exit(1);
	}
	frame = av_frame_alloc();
	if (!frame) {
		fprintf(stderr, "Could not allocate video frame\n");
		exit(1);
	}
	frame->format = c->pix_fmt;
	frame->width = c->width;
	frame->height = c->height;
	ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
	if (ret < 0) {
		fprintf(stderr, "Could not allocate raw picture buffer\n");
		exit(1);
	}
}

/*
Write trailing data to the output file
and free resources allocated by ffmpeg_encoder_start.
*/
void ffmpeg_encoder_finish(void) {
	uint8_t endcode[] = { 0, 0, 1, 0xb7 };
	int got_output, ret;
	do {
		fflush(stdout);
		ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
		if (ret < 0) {
			fprintf(stderr, "Error encoding frame\n");
			exit(1);
		}
		if (got_output) {
			fwrite(pkt.data, 1, pkt.size, file);
			av_packet_unref(&pkt);
		}
	} while (got_output);
	fwrite(endcode, 1, sizeof(endcode), file);
	fclose(file);
	avcodec_close(c);
	av_free(c);
	av_freep(&frame->data[0]);
	av_frame_free(&frame);
}

/*
Encode one frame from an RGB24 input and save it to the output file.
Must be called after ffmpeg_encoder_start, and ffmpeg_encoder_finish
must be called after the last call to this function.
*/
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
	int ret, got_output;
	ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
	av_init_packet(&pkt);
	pkt.data = NULL;
	pkt.size = 0;
	if (frame->pts == 1) {
		frame->key_frame = 1;
		frame->pict_type = AV_PICTURE_TYPE_I;
	}
	else {
		frame->key_frame = 0;
		frame->pict_type = AV_PICTURE_TYPE_P;
	}
	ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
	if (ret < 0) {
		fprintf(stderr, "Error encoding frame\n");
		exit(1);
	}
	if (got_output) {
		fwrite(pkt.data, 1, pkt.size, file);
		av_packet_unref(&pkt);
	}
}

/* Represents the main loop of an application which generates one frame per loop. */
static void encode_example(const char *filename, int codec_id) {
	int pts;
	int width = 320;
	int height = 240;
	uint8_t *rgb = NULL;
	ffmpeg_encoder_start(filename, codec_id, 25, width, height);
	for (pts = 0; pts < 100; pts++) {
		frame->pts = pts;
		rgb = generate_rgb(width, height, pts, rgb);
		ffmpeg_encoder_encode_frame(rgb);
	}
	ffmpeg_encoder_finish();
}

int main(void) {
	avcodec_register_all();
	encode_example("tmp.h264", AV_CODEC_ID_H264);
	encode_example("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO);
	return 0;
}
用的VS编译可以直接运行。

到FFmpeg官网上下载windows 预编译好的库文件即可;FFmpeg头文件及.lib文件存在于 Windows 64/32-bit  Dev包中,.dll文件存在于  Windows 64/32-bit  Shared包中。

  • 0
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
要将 RGB 图像转换为 H.264 视频,您可以使用 FFmpeg 库。下面是一个简单的 C++ 代码示例,演示如何使用 FFmpeg 库将 RGB 图像编码为 H.264 视频: ```cpp #include <iostream> #include <fstream> #include <cstring> #include <cmath> #include <stdint.h> extern "C" { #include "libavcodec/avcodec.h" #include "libavformat/avformat.h" #include "libswscale/swscale.h" #include "libavutil/opt.h" #include "libavutil/imgutils.h" } using namespace std; int main(int argc, char** argv) { // 1. Initialize FFmpeg av_register_all(); // 2. Open output file AVFormatContext* formatContext = nullptr; avformat_alloc_output_context2(&formatContext, nullptr, nullptr, "output.mp4"); if (!formatContext) { cout << "Failed to open output file" << endl; return -1; } // 3. Find video encoder AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!codec) { cout << "Failed to find video encoder" << endl; return -1; } // 4. Create new video stream AVStream* stream = avformat_new_stream(formatContext, codec); if (!stream) { cout << "Failed to create new video stream" << endl; return -1; } // 5. Set video stream parameters stream->codecpar->codec_id = codec->id; stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; stream->codecpar->width = 640; stream->codecpar->height = 480; stream->codecpar->format = AV_PIX_FMT_YUV420P; stream->time_base = { 1, 25 }; // 25 fps // 6. Open video encoder AVCodecContext* codecContext = avcodec_alloc_context3(codec); avcodec_parameters_to_context(codecContext, stream->codecpar); if (avcodec_open2(codecContext, codec, nullptr) < 0) { cout << "Failed to open video encoder" << endl; return -1; } // 7. Allocate frame buffers AVFrame* frame = av_frame_alloc(); frame->format = AV_PIX_FMT_RGB24; frame->width = 640; frame->height = 480; av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, AV_PIX_FMT_RGB24, 1); AVFrame* frameYUV = av_frame_alloc(); frameYUV->format = AV_PIX_FMT_YUV420P; frameYUV->width = 640; frameYUV->height = 480; av_image_alloc(frameYUV->data, frameYUV->linesize, frameYUV->width, frameYUV->height, AV_PIX_FMT_YUV420P, 1); // 8. Convert RGB to YUV SwsContext* swsContext = sws_getContext(frame->width, frame->height, AV_PIX_FMT_RGB24, frameYUV->width, frameYUV->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, nullptr, nullptr, nullptr); if (!swsContext) { cout << "Failed to create SwsContext" << endl; return -1; } // 9. Write header to output file avformat_write_header(formatContext, nullptr); // 10. Encode and write video frames uint8_t* buffer = new uint8_t[640 * 480 * 3]; for (int i = 0; i < 100; i++) { // Generate RGB image for (int y = 0; y < 480; y++) { for (int x = 0; x < 640; x++) { buffer[y * 640 * 3 + x * 3 + 0] = (uint8_t)(sin(x / 10.0 + i / 10.0) * 128 + 128); buffer[y * 640 * 3 + x * 3 + 1] = (uint8_t)(sin(y / 10.0 + i / 7.0) * 128 + 128); buffer[y * 640 * 3 + x * 3 + 2] = (uint8_t)(sin(x / 7.0 + y / 10.0 + i / 5.0) * 128 + 128); } } // Convert RGB to YUV memcpy(frame->data[0], buffer, 640 * 480 * 3); sws_scale(swsContext, frame->data, frame->linesize, 0, frame->height, frameYUV->data, frameYUV->linesize); // Encode and write video frame AVPacket packet; av_init_packet(&packet); packet.data = nullptr; packet.size = 0; frameYUV->pts = i; avcodec_send_frame(codecContext, frameYUV); while (avcodec_receive_packet(codecContext, &packet) == 0) { av_interleaved_write_frame(formatContext, &packet); av_packet_unref(&packet); } } // 11. Write trailer to output file av_write_trailer(formatContext); // 12. Cleanup avformat_free_context(formatContext); avcodec_free_context(&codecContext); av_frame_free(&frame); av_frame_free(&frameYUV); sws_freeContext(swsContext); delete[] buffer; return 0; } ``` 在上面的代码中,我们首先初始化 FFmpeg 库。然后打开一个输出文件,指定 H.264 视频编码器,并创建一个新的视频流。接下来,我们设置视频流的参数,包括视频分辨率,帧率和像素格式等。然后打开视频编码器,并分配 RGB 和 YUV 帧缓冲区。我们使用 SwsContext 将 RGB 帧转换为 YUV 帧,并将它们编码为 H.264 视频帧,并将它们写入输出文件。最后,我们清理并关闭所有资源。 请注意,这只是一个简单的示例,实际使用中还需要进行更多的错误检查和异常处理。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值