FFmpeg —— 10.示例程序(四):音视频分离(分离为AAC、H264格式)

29 篇文章 17 订阅
12 篇文章 9 订阅

参考:https://blog.csdn.net/leixiaohua1020/article/details/39802819

流程图

 

程序源码 

/*
 *
 * 本程序可以将封装格式中的视频码流数据和音频码流数据分离出来
 *
 */

#include <stdio.h>

#define __STDC_CONSTANT_MACROS

extern "C"
{
#include "libavdevice/avdevice.h"
#include "libavformat/avformat.h"
}

/*
 FIX: H.264 in some container format (FLV, MP4, MKV etc.) need
 "h264_mp4toannexb" bitstream filter (BSF)
 *Add SPS,PPS in front of IDR frame
 *Add start code ("0,0,0,1") in front of NALU
 H.264 in some container (MPEG2TS) don't need this BSF.
 */
//'1': Use H.264 Bitstream Filter
#define USE_H264BSF 0

int open_codec_context(int *streamIndex, AVFormatContext *&ofmtCtx, AVFormatContext *ifmtCtx, AVMediaType type)
{
	AVStream *outStream = NULL, *inStream = NULL;
	int ret = -1, index = -1;

	index = av_find_best_stream(ifmtCtx, type, -1, -1, NULL, 0);
	if (index < 0)
	{
		printf("can't find %s stream in input file\n", av_get_media_type_string(type));
		return ret;
	}

	inStream = ifmtCtx->streams[index];

	outStream = avformat_new_stream(ofmtCtx, NULL);
	if (!outStream)
	{
		printf("failed to allocate output stream\n");
		return ret;
	}

	ret = avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
	if (ret < 0)
	{
		printf("failed to copy codec parametes\n");
		return ret;
	}

	outStream->codecpar->codec_tag = 0;

	if (ofmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
	{
		outStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}

	*streamIndex = index;

	return 0;
}


int main(int argc, char *argv[])
{
	AVFormatContext *ifmtCtx = NULL, *ofmtCtxAudio = NULL, *ofmtCtxVideo = NULL;
//	AVCodecContext *codecCtxVideo = NULL, *codecCtxAudio = NULL;
	AVPacket packet;

	int videoIndex = -1, audioIndex = -1;
	int ret = 0;

	char inFilename[128] = "input.mp4";
	char outFilenameAudio[128] = "output.aac";
	char outFilenameVideo[128] = "output.h264";

#if USE_H264BSF
	AVBitStreamFilterContext *h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

	//注册设备
	avdevice_register_all();

	//打开输入流
	ret = avformat_open_input(&ifmtCtx, inFilename, 0, 0);
	if (ret < 0)
	{
		printf("can't open input file\n");
		goto end;
	}

	//获取流信息
	ret = avformat_find_stream_info(ifmtCtx, 0);
	if (ret < 0)
	{
		printf("can't retrieve input stream information\n");
		goto end;
	}

	//创建输出上下文:视频
	avformat_alloc_output_context2(&ofmtCtxVideo, NULL, NULL, outFilenameVideo);
	if (!ofmtCtxVideo)
	{
		printf("can't create video output context");
		goto end;
	}

	//创建输出上下文:音频
	avformat_alloc_output_context2(&ofmtCtxAudio, NULL, NULL, outFilenameAudio);
	if (!ofmtCtxAudio)
	{
		printf("can't create audio output context");
		goto end;
	}

#if 1
	ret = open_codec_context(&videoIndex, ofmtCtxVideo, ifmtCtx, AVMEDIA_TYPE_VIDEO);
	if (ret < 0)
	{
		printf("can't decode video context\n");
		goto end;
	}


	ret = open_codec_context(&audioIndex, ofmtCtxAudio, ifmtCtx, AVMEDIA_TYPE_AUDIO);
	if (ret < 0)
	{
		printf("can't decode video context\n");
		goto end;
	}
#endif

#if 0
	for (i = 0; i < ifmtCtx->nb_streams; ++i)
	{
		AVFormatContext *ofmtCtx;
		AVStream *inStream = ifmtCtx->streams[i];
		AVStream *outStream = NULL;

		if (ifmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoIndex = i;
			outStream = avformat_new_stream(ofmtCtxVideo, inStream->codec->codec);
			ofmtCtx = ofmtCtxVideo;
		}
		else if (ifmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			audioIndex = i;
			outStream = avformat_new_stream(ofmtCtxAudio, inStream->codec->codec);
			ofmtCtx = ofmtCtxAudio;
		}
		else
		{
			break;
		}

		if (!outStream)
		{
			printf("failed to allocate output stream\n");
			goto end;
		}

		if (avcodec_copy_context(outStream->codec, inStream->codec) < 0)
		{
			printf("failed to copy context from input to output stream codec context\n");
			goto end;
		}

		outStream->codec->codec_tag = 0;

		if (ofmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
		{
			outStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
		}
	}
#endif

	//Dump Format------------------
	printf("\n==============Input Video=============\n");
	av_dump_format(ifmtCtx, 0, inFilename, 0);
	printf("\n==============Output Video============\n");
	av_dump_format(ofmtCtxVideo, 0, outFilenameVideo, 1);
	printf("\n==============Output Audio============\n");
	av_dump_format(ofmtCtxAudio, 0, outFilenameAudio, 1);
	printf("\n======================================\n");

	//打开输出文件:视频
	if (!(ofmtCtxVideo->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&ofmtCtxVideo->pb, outFilenameVideo, AVIO_FLAG_WRITE) < 0)
		{
			printf("can't open output file: %s\n", outFilenameVideo);
			goto end;
		}
	}

	//打开输出文件:音频
	if (!(ofmtCtxAudio->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&ofmtCtxAudio->pb, outFilenameAudio, AVIO_FLAG_WRITE) < 0)
		{
			printf("can't open output file: %s\n", outFilenameVideo);
			goto end;
		}
	}

	//写文件头
	if (avformat_write_header(ofmtCtxVideo, NULL) < 0)
	{
		printf("Error occurred when opening video output file\n");
		goto end;
	}

	if (avformat_write_header(ofmtCtxAudio, NULL) < 0)
	{
		printf("Error occurred when opening audio output file\n");
		goto end;
	}


	while (1)
	{
		AVFormatContext *ofmtCtx;
		AVStream *inStream, *outStream;

		if (av_read_frame(ifmtCtx, &packet) < 0)
		{
			break;
		}

		inStream = ifmtCtx->streams[packet.stream_index];

		if (packet.stream_index == videoIndex)
		{
			outStream = ofmtCtxVideo->streams[0];
			ofmtCtx = ofmtCtxVideo;
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, inStream->codec, NULL, &packet.data, &packet.size, packet.data,
					packet.size, 0);
#endif
		}
		else if (packet.stream_index == audioIndex)
		{
			outStream = ofmtCtxAudio->streams[0];
			ofmtCtx = ofmtCtxAudio;
		}
		else
		{
			continue;
		}

		//convert PTS/DTS
		packet.pts = av_rescale_q_rnd(packet.pts, inStream->time_base, outStream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		packet.dts = av_rescale_q_rnd(packet.dts, inStream->time_base, outStream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		packet.duration = av_rescale_q(packet.duration, inStream->time_base, outStream->time_base);
		packet.pos = -1;
		packet.stream_index = 0;

		//write
		if (av_interleaved_write_frame(ofmtCtx, &packet) < 0)
		{
			printf("Error muxing packet\n");
			break;
		}

		av_packet_unref(&packet);

	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);
#endif

	//write file trailer
	av_write_trailer(ofmtCtxVideo);
	av_write_trailer(ofmtCtxAudio);

	end:

	avformat_close_input(&ifmtCtx);

	if (ofmtCtxVideo && !(ofmtCtxVideo->oformat->flags & AVFMT_NOFILE))
	{
		avio_close(ofmtCtxVideo->pb);
	}

	if (ofmtCtxAudio && !(ofmtCtxAudio->oformat->flags & AVFMT_NOFILE))
	{
		avio_close(ofmtCtxAudio->pb);
	}

	avformat_free_context(ofmtCtxVideo);
	avformat_free_context(ofmtCtxAudio);

	return 0;
}

 

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个使用RV1109采集音视频并使用H.264和AAC格式保存本地文件的简单示例。在这个示例中,我们使用了RV1109媒体采集和编码库进行音视频采集和编码,同时使用了FFmpeg库进行H.264和AAC格式的编码和保存。 ```c #include <stdio.h> #include <stdlib.h> #include "media.h" #include "ffencoder.h" #define VIDEO_WIDTH 1280 #define VIDEO_HEIGHT 720 #define VIDEO_FPS 30 #define VIDEO_BITRATE 1000000 #define AUDIO_SAMPLERATE 44100 #define AUDIO_CHANNELS 2 #define AUDIO_BITRATE 64000 #define VIDEO_OUTPUT_FILENAME "video.h264" #define AUDIO_OUTPUT_FILENAME "audio.aac" int main() { /* 初始化音视频采集模块 */ media_init(); /* 配置视频采集参数 */ media_set_video_params(VIDEO_WIDTH, VIDEO_HEIGHT, VIDEO_FPS, VIDEO_BITRATE); /* 配置音频采集参数 */ media_set_audio_params(AUDIO_SAMPLERATE, AUDIO_CHANNELS, AUDIO_BITRATE); /* 创建H.264编码器 */ FFEncoder* video_encoder = ffencoder_create(VIDEO_WIDTH, VIDEO_HEIGHT, VIDEO_BITRATE, 30, CODEC_ID_H264); if (video_encoder == NULL) { printf("Failed to create H.264 encoder\n"); return -1; } /* 创建AAC编码器 */ FFEncoder* audio_encoder = ffencoder_create(0, 0, 0, AUDIO_SAMPLERATE, CODEC_ID_AAC); if (audio_encoder == NULL) { printf("Failed to create AAC encoder\n"); return -1; } /* 打开输出文件 */ FILE* video_output_file = fopen(VIDEO_OUTPUT_FILENAME, "wb"); if (video_output_file == NULL) { printf("Failed to open video output file\n"); return -1; } FILE* audio_output_file = fopen(AUDIO_OUTPUT_FILENAME, "wb"); if (audio_output_file == NULL) { printf("Failed to open audio output file\n"); return -1; } /* 开始音视频采集 */ media_start_capture(); while (1) { /* 采集一帧音视频数据 */ FrameData frame; if (media_capture_frame(&frame) < 0) { printf("Failed to capture frame\n"); break; } /* 编码视频帧 */ if (frame.type == FRAME_TYPE_VIDEO) { AVPacket pkt; if (ffencoder_encode(video_encoder, &frame, &pkt) >= 0) { /* 将H.264码流写入文件 */ fwrite(pkt.data, 1, pkt.size, video_output_file); av_packet_unref(&pkt); } } /* 编码音频帧 */ if (frame.type == FRAME_TYPE_AUDIO) { AVPacket pkt; if (ffencoder_encode(audio_encoder, &frame, &pkt) >= 0) { /* 将AAC码流写入文件 */ fwrite(pkt.data, 1, pkt.size, audio_output_file); av_packet_unref(&pkt); } } } /* 关闭输出文件 */ fclose(video_output_file); fclose(audio_output_file); /* 销毁编码器 */ ffencoder_destroy(video_encoder); ffencoder_destroy(audio_encoder); return 0; } ``` 在这个示例中,我们首先初始化了音视频采集模块,并配置了视频和音频的采集参数。然后,我们创建了H.264和AAC编码器,以便将采集到的音视频数据编码为H.264和AAC格式。接着,我们打开了输出文件,并在一个循环中采集音视频数据并进行编码。在编码过程中,我们将H.264码流和AAC码流分别写入到输出文件中。最后,我们关闭了输出文件并销毁了编码器。 需要注意的是,示例中使用的编码器和输出文件仅供参考,具体的编码器和输出文件格式可能会因应用场景而有所不同。在实际使用中,需要参考相应的库文档和API参考手册,以获取具体的使用方法和参数信息。同时,在使用RV1109进行音视频开发时,还需要考虑芯片的硬件限制和应用场景的需求,以选择合适的编码器和输出文件格式
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值