ffmpeg合并(复用)音频和视频文件,组成mp4

ffmpeg合并(复用)音频和视频文件,组成mp4。程序如下:

/*
合并音频和视频,形成音视频
*/

extern "C"
{
#include "libavutil/avutil.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
}


#pragma warning(disable:4996)


int main()
{
	//
	const char *srcMedia1 = "out/t2Video.h264";
	const char *srcMedia2 = "out/T2audio.aac";
	const char *destMedia = "out/T2.mp4";
	char errors[200] = { 0 };
	AVFormatContext *inFormatContext1 = NULL;
	AVFormatContext *inFormatContext2 = NULL;
	AVFormatContext *outFormatContext = NULL;

	//输入上下文1-视频
	int ret = 0;
	av_log_set_level(AV_LOG_INFO);
	av_register_all();
	ret = avformat_open_input(&inFormatContext1, srcMedia1, NULL, NULL);
	if (ret != 0)
	{
		av_strerror(ret, errors, 200);
		av_log(NULL, AV_LOG_WARNING, "error, ret=%d, msg=%s\n", ret, errors);
		return -1;
	}
	avformat_find_stream_info(inFormatContext1, NULL);
	av_dump_format(inFormatContext1, -1, srcMedia1, 0);
	//输入上下文1-音频
	ret = avformat_open_input(&inFormatContext2, srcMedia2, NULL, NULL);
	avformat_find_stream_info(inFormatContext2, NULL);
	av_dump_format(inFormatContext2, -1, srcMedia2, 0);
	//输出上下文
	avformat_alloc_output_context2(&outFormatContext, NULL, NULL, destMedia);
	AVOutputFormat *outFormat = outFormatContext->oformat;
	int stream1 = 0;
	AVStream *inStream1 = NULL;
	//复制流信息
	if(inFormatContext1->nb_streams > 0)
	{
		stream1 = 1;
		inStream1 = inFormatContext1->streams[0];
		AVStream *outStream = avformat_new_stream(outFormatContext, NULL);
		avcodec_parameters_copy(outStream->codecpar, inStream1->codecpar);
		outStream->codecpar->codec_tag = 0;
	}
	int stream2 = 0;
	AVStream *inStream2 = NULL;
	if (inFormatContext2->nb_streams > 0)
	{
		stream2 = 1;
		inStream2 = inFormatContext2->streams[0];
		AVStream *outStream = avformat_new_stream(outFormatContext, NULL);
		avcodec_parameters_copy(outStream->codecpar, inStream2->codecpar);
		outStream->codecpar->codec_tag = 0;
	}
	av_dump_format(outFormatContext, -1, destMedia, 1);
	//打开文件
	avio_open(&outFormatContext->pb, destMedia, AVIO_FLAG_WRITE);
	avformat_write_header(outFormatContext, NULL);
	int64_t curPts1 = 0;
	int64_t curPts2 = 0;
	
	AVPacket avPacket;
	av_init_packet(&avPacket);
	//输入流时间基
	AVRational inStream1time = inStream1->time_base;
	AVRational inStream2time = inStream2->time_base;
	int frameIndex = 0;
	//交替写入音频和视频数据
	while (stream1 || stream2)
	{
		if (stream1 && (!stream2 || av_compare_ts(curPts1, inStream1time, curPts2, inStream2time) <= 0))
		{
			ret = av_read_frame(inFormatContext1, &avPacket);
			if (ret < 0)
			{
				stream1 = 0;
				continue;
			}
			//raw h264无pts,手动添加
			if (avPacket.pts == AV_NOPTS_VALUE)
			{
				AVRational timeBase = inStream1time;
				int64_t calcDuration = AV_TIME_BASE / av_q2d(inStream1->r_frame_rate);
				avPacket.pts = (double)(frameIndex*calcDuration) / (av_q2d(timeBase)*AV_TIME_BASE);
				avPacket.dts = avPacket.pts;
				avPacket.duration = (double)calcDuration / (av_q2d(timeBase)*AV_TIME_BASE);
				frameIndex++;
			}
			curPts1 = avPacket.pts;
			AVStream *outStream = outFormatContext->streams[0];
			avPacket.pts = av_rescale_q_rnd(avPacket.pts, inStream1time, outStream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			avPacket.dts = avPacket.pts;
			avPacket.duration = av_rescale_q(avPacket.duration, inStream1time, outStream->time_base);
			avPacket.pos = -1;
			avPacket.stream_index = 0;
			//av_log(NULL, AV_LOG_INFO, "xxxxxxxxx%d, dts=%lld, pts=%lld, duration=%lld\n", frameIndex, avPacket.dts, avPacket.pts, avPacket.duration);
			stream1 = !av_interleaved_write_frame(outFormatContext, &avPacket);
		}
		else if (stream2)
		{
			ret = av_read_frame(inFormatContext2, &avPacket);
			if (ret < 0)
			{
				stream2 = 0;
				continue;
			}
			//raw aac无pts,手动添加
			if (avPacket.pts == AV_NOPTS_VALUE)
			{
				AVRational timeBase = inStream2time;
				int64_t calcDuration = AV_TIME_BASE / av_q2d(inStream2->r_frame_rate);
				avPacket.pts = (double)(frameIndex*calcDuration) / (av_q2d(timeBase)*AV_TIME_BASE);
				avPacket.dts = avPacket.pts;
				avPacket.duration = (double)calcDuration / (av_q2d(timeBase)*AV_TIME_BASE);
				frameIndex++;
			}
			curPts2 = avPacket.pts;
			AVStream *outStream = outFormatContext->streams[1];
			avPacket.pts = av_rescale_q_rnd(avPacket.pts, inStream2time, outStream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			avPacket.dts = avPacket.pts;
			avPacket.duration = av_rescale_q(avPacket.duration, inStream2time, outStream->time_base);
			avPacket.pos = -1;
			avPacket.stream_index = 1;
			av_log(NULL, AV_LOG_INFO, "xxxxxxxxx%d, size:%5d, dts=%lld, pts=%lld, duration=%lld\n", frameIndex, avPacket.size, avPacket.dts, avPacket.pts, avPacket.duration);
			stream2 = !av_interleaved_write_frame(outFormatContext, &avPacket);
		}
		av_packet_unref(&avPacket);
	}
	ret = av_write_trailer(outFormatContext);
	if (ret != 0)
	{
		av_strerror(ret, errors, 200);
		av_log(NULL, AV_LOG_WARNING, "av_write_trailer error: ret=%d, msg=%s\n", ret, errors);
	}

	//释放资源
	avformat_close_input(&inFormatContext1);
	avformat_close_input(&inFormatContext2);
	avio_close(outFormatContext->pb);

	return 0;
}

相关链接:最简单的基于FFmpeg的封装格式处理:视音频复用器(muxer)_雷霄骅(leixiaohua1020)的专栏-CSDN博客_muxer

基于ffmpeg复用器是一种用于音视频处理的工具,它可以将多个音视频合并成一个媒体文件。ffmpeg是一个开源的跨平台音视频处理工具库,可以处理众多格式的音视频文件。 基于ffmpeg复用器可以用于将多个音频合并成一个音频文件,多个视频合并成一个视频文件,或者将音频视频合并成一个完整的多媒体文件。它可以对音视频流进行解码处理,然后根据需要将其进行复用打包。 复用器的主要功能是将多个音视频流的解码数据进行合并处理,然后将合并后的数据新打包成一个媒体文件。复用器会根据音视频流的时间轴进行同步处理,确保最终合成的媒体文件能够正常播放。 基于ffmpeg复用器具有高度的灵活性和可定制性。用户可以通过命令行选择需要复用的音视频流,并指定输出格式和参数。可以通过设置参数来调整音视频编码格式、分辨率、码率等参数,以满足不同的需求。 基于ffmpeg复用器还提供了丰富的滤镜效果和编辑功能,例如可以对音频进行混音、混响、降噪处理,对视频进行剪切、合并、旋转等操作。这些功能使得基于ffmpeg复用器成为一个强大的音视频处理工具。 总之,基于ffmpeg复用器是一个功能强大的工具,它可以对各种格式的音视频流进行合并处理,使得用户可以方便地实现音视频的定制和编辑。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值