一段ffmpeg视频解码为YUV420P的示例代码

最近在调试on2的rmvb硬解码,因为demux代码采用的是ffmpeg的,所以就写了个简单的ffmpeg的应用程序,能够看到每一帧的指定字节数,解码后的YUV图片,很基础吧,就当记录一下

/**
 * decode video by ffmpeg-0.8.14 for rv test 
 *
 * 2013-04-25
 *	juguofeng
 */

#include <stdlib.h>
#include <stdio.h>
#include <string.h>

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>


FILE *pfout = NULL;
char ffrvout[128] = { 0 };

/* how many yuv pic you want to save */
#define FRAME_NUM 50
/* enable video demux data save to file */
//#define ENABLE_DEMUX_SAVE
/* enable yuv pic save to file */
#define ENABLE_YUV_SAVE
/* enable print each video bytes */
#define ENABLE_PRINT_FRAME_BYTES
/* how many bytes you want to print */
#define PRINT_BYTES 30

/**
 * save yuv frame
 */
void yuv420p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
	int i = 0;

	int width = pCodecCtx->width, height = pCodecCtx->height;
	int height_half = height / 2, width_half = width / 2;
	int y_wrap = pFrame->linesize[0];
	int u_wrap = pFrame->linesize[1];
	int v_wrap = pFrame->linesize[2];

	unsigned char *y_buf = pFrame->data[0];
	unsigned char *u_buf = pFrame->data[1];
	unsigned char *v_buf = pFrame->data[2];

	//save y
	for (i = 0; i < height; i++)
		fwrite(y_buf + i * y_wrap, 1, width, pfout);
	//save u
	for (i = 0; i < height_half; i++)
		fwrite(u_buf + i * u_wrap, 1, width_half, pfout);
	//save v
	for (i = 0; i < height_half; i++)
		fwrite(v_buf + i * v_wrap, 1, width_half, pfout);
	fflush(pfout);
}

/**
 * main thread
 */
int main(int argc, char *argv[])
{
	int i;
	char szFileName[128] = {0};
	int decLen = 0;
	int frame = 0;

	AVCodecContext *pCodecCtx = NULL;
	AVFrame *pFrame = NULL;
	AVCodec *pCodec = NULL;
	AVFormatContext *pFormatCtx = NULL;

	if(argc != 3)
	{
		fprintf(stderr, "ERROR:need 3 argument!\n");
		exit(-1);
	}
	
	sprintf(szFileName, "%s", argv[1]);

#ifdef ENABLE_DEMUX_SAVE
	FILE* frvdemux = fopen("rvdemuxout.rm","wb+");
	if (NULL == frvdemux)
	{
		fprintf(stderr, "create rvdemuxout file failed\n");
		exit(1);
	}    
#endif

	/* output yuv file name */
	sprintf(ffrvout, "%s", argv[2]);

	pfout = fopen(ffrvout, "wb+");
	if (NULL == pfout)
	{
		printf("create output file failed\n");
		exit(1);
	}
	printf("==========> Begin test ffmpeg call ffmpeg rv decoder\n");
	av_register_all();

	/* Open input video file */
	//printf("before avformat_open_input [%s]\n", szFileName);
	if(avformat_open_input(&pFormatCtx, szFileName, NULL, NULL)!= 0)
	{
		fprintf(stderr, "Couldn't open input file\n");
		return -1;
	}
	//printf("after avformat_open_input\n");

	/* Retrieve stream information */
	if(av_find_stream_info(pFormatCtx) < 0)
	{
		printf("av_find_stream_info ERROR\n");
		return -1;
	}
	//printf("after av_find_stream_info, \n");


	/* Find the first video stream */
	int videoStream = -1;
	printf("==========> pFormatCtx->nb_streams = %d\n", pFormatCtx->nb_streams);

	for(i = 0; i < pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			printf("the first video stream index: videoStream = %d\n",videoStream);
			break;
		}
	}

	if(videoStream == -1)
		return -1;		// Didn't find a video stream

	/* Get a pointer to the codec context for the video stream */
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;
	printf("pCodecCtx->codec_id = %d\n", pCodecCtx->codec_id);

	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec == NULL) {
		fprintf(stderr, "can not find decoder!\n");
		return -1;
	}

	/* Open codec */
	if(avcodec_open(pCodecCtx, pCodec)<0)
	{
		printf("cannot open software codec\n");
		return -1; // Could not open codec
	}
	printf("==========> Open software codec success\n");

	pFrame = avcodec_alloc_frame();
	if(pFrame == NULL)
	{
		fprintf(stderr, "avcodec_alloc_frame() ERROR\n");
		return -1;
	}
	
	/* flag whether we get a decoded yuv frame */
	int frameFinished;
	int packetno = 0;

	AVPacket packet;
	av_init_packet(&packet);

	while(av_read_frame(pFormatCtx, &packet) >= 0) {
		//printf("[main]avpkt->slice_count=%d\n", packet.sliceNum);

		/* Is this a packet from the video stream? */
		if(packet.stream_index == videoStream) {
			packetno++;
#ifdef ENABLE_PRINT_FRAME_BYTES
		if ( 1 ) {
			int i;
			int size = packet.size < PRINT_BYTES ? packet.size : PRINT_BYTES;
			unsigned char *data = packet.data;
			printf("===>[%5d] [", packet.size);
			for (i = 0; i < size; i++)
				printf("%02x ", data[i]);
			printf("]\n");
		}
#endif
#ifdef ENABLE_DEMUX_SAVE
			fwrite(packet.data, 1, packet.size, frvdemux);
#endif
			//printf("[the %d packet]packet.size = %d\n", packetno++, packet.size);

			while (packet.size > 0) {
				decLen = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
				//printf("[video_decode_example]after avcodec_decode_video2,decoded=%d\n",decLen);

				if (decLen < 0)	{
					fprintf(stderr, "[video_decode_example]Error while decoding frame %d\n", frame);
					//exit(1);
					/* FIXME if decode one frame err, ignore this frame */
					decLen = packet.size;
				}

				if (frameFinished) {
					//printf("got a yuv frame\n");
					//printf(stderr, "[video_decode_example]saving frame %3d\n", frame);

					/* the picture is allocated by the decoder. no need to free it */
					if (frame == 1) 
						printf("[video_decode_example]picture->linesize[0]=%d, c->width=%d,c->height=%d\n", 
								pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);
#ifdef ENABLE_YUV_SAVE
					/* save yuv pic */
					if (frame < FRAME_NUM) {
						yuv420p_save(pFrame, pCodecCtx);
					}
#endif
					/* frame index grow */
					frame++;
				}
				//printf("===========> %d\n", decLen);
				/* left data in pkt , go on decoding */
				packet.data += decLen;
				packet.size -= decLen;
			}
			if (frame == FRAME_NUM) {
				printf("==========> decoded [%d pkt frames] ---> save [%d YUV frames], enough to stop!\n", packetno, FRAME_NUM);
				break;
			}
		}

		/* FIXME no need free in this file */
		//printf("free packet that was allocated by av_read_frame\n");
		// Free the packet that was allocated by av_read_frame
		//av_free_packet(&packet);
	}

	printf("decoding job down! begin to free\n");
	/* Free the YUV frame */
	av_free(pFrame);

	/* Close the codec */
	avcodec_close(pCodecCtx);

	/* Close the video file */
	av_close_input_file(pFormatCtx);
	fclose(pfout);

	printf("==========> END-OK\n");

	return 0;
}

再给出Makefile代码

# use pkg-config for getting CFLAGS abd LDFLAGS
FFMPEG_LIBS=libavdevice libavformat libavfilter libavcodec libswscale libavutil
CFLAGS+=$(shell pkg-config  --cflags $(FFMPEG_LIBS))
LDFLAGS+=$(shell pkg-config --libs $(FFMPEG_LIBS))

EXAMPLES=ffmpeg-rm-test

OBJS=$(addsuffix .o,$(EXAMPLES))

%: %.o
	$(CC) $< $(LDFLAGS) -o $@

%.o: %.c
	$(CC) $< $(CFLAGS) -c -o $@

.phony: all clean

all: $(OBJS) $(EXAMPLES)

clean:
	rm -rf $(EXAMPLES) $(OBJS)

编译这个程序,需要机子安装ffmpeg的库,最好自己下载一个ffmpeg-0.8分支,或者之后的版本,编译安装到/usr/local/目录下,如果需要的话,环境变量中还需要加上
PKG_CONFIG_PATH和LD_LIBRARY_PATH,不然可能会报找不到ffmpeg的相关库


MediaCodec 是 Android 平台上的一个多媒体编解码器,用于对视频和音频进行编解码处理。由于 MediaCodec 是 Android 平台独有的 API,因此 C++ 直接调用 MediaCodec 进行视频解码可能不太方便。不过,如果你仍然想要使用 C++ 编写 MP4 文件解码YUV 格式的程序,可以考虑以下步骤: 1. 使用 FFmpeg 或者其他的开源库对 MP4 文件进行解码。这些开源库提供了一些 API,可以方便地将 MP4 文件解码YUV 格式的视频数据。例如,可以使用 FFmpeg 的 avcodec_decode_video2() 函数将 MP4 文件解码YUV 格式的视频数据。 2. 在解码过程中,需要将解码出来的视频数据保存到内存中。可以使用 C++ 的动态内存分配方式,例如 new 或者 malloc 函数来分配存储视频数据的内存。需要注意的是,内存的大小应该根据视频的分辨率、帧率和像素格式来确定。 3. 将解码出来的 YUV 格式的视频数据保存到文件中。可以使用 C++ 的文件操作函数,例如 fopen、fwrite 等函数将视频数据写入文件中。 代码示例: ```c++ #include <iostream> #include <fstream> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <errno.h> #include <sys/stat.h> #include <sys/types.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> using namespace std; // 输入文件名 const char* input_filename = "input.mp4"; // 输出文件名 const char* output_filename = "output.yuv"; int main(int argc, char* argv[]) { int ret; AVFormatContext* fmt_ctx = NULL; AVCodecContext* codec_ctx = NULL; AVCodec* codec = NULL; AVPacket pkt; AVFrame* frame = NULL; int video_stream_idx = -1; FILE* fp_out = NULL; int frame_count = 0; // 1. 打开输入文件 ret = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not open input file '%s'", input_filename); return ret; } // 2. 查找视频流 ret = avformat_find_stream_info(fmt_ctx, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not find stream information"); goto end; } for (int i = 0; i < fmt_ctx->nb_streams; i++) { AVStream* stream = fmt_ctx->streams[i]; if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream_idx = i; break; } } if (video_stream_idx == -1) { av_log(NULL, AV_LOG_ERROR, "Could not find video stream"); ret = -1; goto end; } // 3. 打开视频解码器 codec_ctx = avcodec_alloc_context3(NULL); if (!codec_ctx) { av_log(NULL, AV_LOG_ERROR, "Could not allocate video codec context"); ret = AVERROR(ENOMEM); goto end; } ret = avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream_idx]->codecpar); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not copy codec parameters to codec context"); goto end; } codec = avcodec_find_decoder(codec_ctx->codec_id); if (!codec) { av_log(NULL, AV_LOG_ERROR, "Could not find decoder for codec ID %d", codec_ctx->codec_id); goto end; } ret = avcodec_open2(codec_ctx, codec, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not open codec"); goto end; } // 4. 分配解码帧内存 frame = av_frame_alloc(); if (!frame) { av_log(NULL, AV_LOG_ERROR, "Could not allocate frame"); ret = AVERROR(ENOMEM); goto end; } // 5. 打开输出文件 fp_out = fopen(output_filename, "wb+"); if (!fp_out) { av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", output_filename); ret = -1; goto end; } // 6. 解码视频帧 while (1) { ret = av_read_frame(fmt_ctx, &pkt); if (ret < 0) { break; } if (pkt.stream_index == video_stream_idx) { ret = avcodec_send_packet(codec_ctx, &pkt); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error sending a packet for decoding"); break; } while (1) { ret = avcodec_receive_frame(codec_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error during decoding"); goto end; } if (frame_count++ % 25 == 0) { // 7. 将解码出来的 YUV 数据保存到文件中 for (int i = 0; i < codec_ctx->height; i++) { fwrite(frame->data[0] + i * frame->linesize[0], 1, codec_ctx->width, fp_out); } for (int i = 0; i < codec_ctx->height / 2; i++) { fwrite(frame->data[1] + i * frame->linesize[1], 1, codec_ctx->width / 2, fp_out); } for (int i = 0; i < codec_ctx->height / 2; i++) { fwrite(frame->data[2] + i * frame->linesize[2], 1, codec_ctx->width / 2, fp_out); } } } } av_packet_unref(&pkt); } end: avformat_close_input(&fmt_ctx); avcodec_free_context(&codec_ctx); av_frame_free(&frame); if (fp_out) { fclose(fp_out); } return ret; } ``` 这段代码使用 FFmpeg 库将 MP4 文件解码YUV 格式的视频数据,并将视频数据保存到文件中。需要注意的是,代码中只是将每隔 25 帧的视频数据保存到文件中,你可以根据需要调整保存视频数据的频率。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值