基于FFmpeg H264 + G711A 音视频裸流合并 MP4文件 ( G711A 转 AAC)

        由于 FFmpeg 只支持H264+AAC的mp4封装格式的,并不支持H264+G711的mp4封装格式。所以需要将G711a转码成AAC格式的,然后封装成mp4文件,但网上有说 通过修改movenc.c文件, 重新编译ffmpeg,能支持H264+G711a的 ,但我尝试编译,没有成功,你们可以尝试编译一下,要是成功了,希望能一起分享一下,哈哈。

文章链接:C++编程-7:ffmpeg支持G711音频和H.264视频数据同步封装进MP4文件_椰果奶茶加冰的博客-CSDN博客_ffmpeg g711

代码:

1.首先是将G711a转码成AAC。基于该类库: https://github.com/EasyDarwin/EasyAACEncoder

    InitParam initParam;
	initParam.u32AudioSamplerate=8000;
	initParam.ucAudioChannel=1;
	initParam.u32PCMBitSize=16;
	initParam.ucAudioCodec = Law_ALaw;
	//initParam.ucAudioCodec = Law_ULaw;
	EasyAACEncoder_Handle handle = Easy_AACEncoder_Init( initParam);
	char* infilename = "src.g711a";  //标准
	char* outAacname = "g711.aac";

	FILE* fpIn = fopen(infilename, "rb");
	if(NULL == fpIn)
	{
		printf("%s:[%d] open %s file failed\n",__FUNCTION__,__LINE__,infilename);
		return -1;
	}

	FILE* fpOut = fopen(outAacname, "wb");
	if(NULL == fpOut)
	{
		printf("%s:[%d] open %s file failed\n",__FUNCTION__,__LINE__,outAacname);
		return -1;
	}

	int gBytesRead = 0;
	int bG711ABufferSize = 500;
	int bAACBufferSize = 4*bG711ABufferSize;//提供足够大的缓冲区
	unsigned char *pbG711ABuffer = (unsigned char *)malloc(bG711ABufferSize *sizeof(unsigned char));
	unsigned char *pbAACBuffer = (unsigned char*)malloc(bAACBufferSize * sizeof(unsigned char));  
	unsigned int out_len = 0;

	while((gBytesRead = fread(pbG711ABuffer, 1, bG711ABufferSize, fpIn)) >0)
	{    
		if(Easy_AACEncoder_Encode(handle, pbG711ABuffer, gBytesRead, pbAACBuffer, &out_len) > 0)
		{
			fwrite(pbAACBuffer, 1, out_len, fpOut);
		}
		else
		{

		}
	}

	Easy_AACEncoder_Release(handle);

	free(pbG711ABuffer);
	free(pbAACBuffer);
	fclose(fpIn);
	fclose(fpOut);

2.  将H264 + AAC保存到MP4文件 参考:ffmpeg h264文件和裸流 封装mp4 - 简书 

   1)创建文件

int FFmpegTool::CreateMp4(const char* filename)
{
	int ret; // 成功返回0,失败返回1
	const char* pszFileName = filename;
	AVOutputFormat *fmt;
	AVCodec *video_codec;
	AVStream *m_pVideoSt;

	AVCodec *audio_codec;
	AVStream *m_pAudioSt;

	av_register_all();
	avformat_alloc_output_context2(&m_pOc, NULL, NULL, pszFileName);
	if (!m_pOc)
	{
		printf("Could not deduce output format from file extension: using MPEG. \n");
		avformat_alloc_output_context2(&m_pOc, NULL, "mpeg", pszFileName);
	}
	if (!m_pOc)
	{
		return 1;
	}
	fmt = m_pOc->oformat;
	if (fmt->video_codec != AV_CODEC_ID_NONE)
	{
		// 添加视频和音频流信息
		m_pVideoSt = add_stream(m_pOc, &video_codec, AV_CODEC_ID_H264, 0);
		m_pAudioSt = add_stream(m_pOc, &audio_codec, AV_CODEC_ID_AAC, 1);
	}

	// 打开视音频流
	if (m_pAudioSt)
	{
		open_audio(m_pOc, audio_codec, m_pAudioSt);
	}


	if (m_pVideoSt)
	{
		open_video(m_pOc, video_codec, m_pVideoSt);
	}


	printf("==========Output Information==========\n");
	av_dump_format(m_pOc, 0, pszFileName, 1);
	printf("======================================\n");
	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE))
	{
		// 打开输出流
		ret = avio_open(&m_pOc->pb, pszFileName, AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			printf("could not open %s\n", pszFileName);
			return 1;
		}
	}
	/* Write the stream header, if any */
	ret = avformat_write_header(m_pOc, NULL);
	if (ret < 0)
	{
		printf("Error occurred when opening output file");
		return 1;
	}
}

   

bool isIdrFrame(uint8_t* buf, int len) {
	switch (buf[0] & 0x1f) {
	case 7: // SPS
		return true;
	case 8: // PPS
		return true;
	case 5:
		return true;
	case 1:
		return false;

	default:
		return false;
		break;
	}

	return false;
}

// 判断关键帧
bool FFmpegTool::judgeKeyFrame(uint8_t* buf, int size) {
	//主要是解析idr前面的sps pps
	int last = 0;
	for (int i = 2; i <= size; ++i) {
		if (i == size) {
			if (last) {
				bool ret = isIdrFrame(buf + last, i - last);
				if (ret) {
					return true;
				}
			}
		}
		else if (buf[i - 2] == 0x00 && buf[i - 1] == 0x00 && buf[i] == 0x01) {
			if (last) {
				int size = i - last - 3;
				if (buf[i - 3]) ++size;
				bool ret = isIdrFrame(buf + last, size);
				if (ret) {
					return true;
				}
			}
			last = i + 1;
		}
	}
	return false;

}

// 输出流中添加视音频流
AVStream * FFmpegTool::add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id, int type)
{
	AVCodecContext *c;
	AVStream *st;
	/* find the encoder */
	*codec = avcodec_find_encoder(codec_id);
	if (!*codec)
	{
		printf("could not find encoder for '%s' \n", avcodec_get_name(codec_id));
		exit(1);
	}
	st = avformat_new_stream(oc, *codec);
	if (!st)
	{
		printf("could not allocate stream \n");
		exit(1);
	}
	st->id = oc->nb_streams - 1;
	c = st->codec;

	if (type == 0)
		m_vi_nstream = st->index;
	else
		m_ai_nstream = st->index;

	AVRational time_base;

	switch ((*codec)->type)
	{
	case AVMEDIA_TYPE_AUDIO:
		c->sample_fmt = AV_SAMPLE_FMT_S16P;
		//c->bit_rate = 128000;
		c->sample_rate = m_sample_rate;
		c->channels = m_channel;
		c->codec_id = AV_CODEC_ID_AAC;
		c->channel_layout = AV_CH_LAYOUT_STEREO;
		time_base = { 1, c->sample_rate };
		st->time_base = time_base;
		break;
	case AVMEDIA_TYPE_VIDEO:
		c->codec_id = AV_CODEC_ID_H264;
		c->bit_rate = m_bit_rate;
		c->width = m_width;
		c->height = m_height;
		c->time_base.den = m_fps;
		c->time_base.num = 1;
		c->gop_size = 1;
		c->pix_fmt = AV_PIX_FMT_YUV420P;
		time_base = { 1, m_fps };
		st->time_base = time_base;
		if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
		{
			c->max_b_frames = 2;
		}
		if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO)
		{
			c->mb_decision = 2;
		}
		break;
	default:
		break;
	}
	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
	{
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}
	return st;
}

// 打开视频编码器
void FFmpegTool::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
	int ret;
	AVCodecContext *c = st->codec;
	/* open the codec */
	ret = avcodec_open2(c, codec, NULL);
	if (ret < 0)
	{
		printf("could not open video codec:%d", ret);
	}
}

// 打开音频编码器
void FFmpegTool::open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
	int ret;
	AVCodecContext *c = st->codec;
	/* open the codec */
	ret = avcodec_open2(c, codec, NULL);
	if (ret < 0)
	{
		printf("could not open audio codec:%d", ret);
		//exit(1);
	}
}

2)  写入音视频

// 写入视频
void FFmpegTool::WriteVideo(void* data, int nLen, int type)
{

	AVStream *pst;

	// 分为视频流和音频流
	if (type == 0)
		pst = m_pOc->streams[m_vi_nstream];
	else
		pst = m_pOc->streams[m_ai_nstream];

	// Init packet
	AVPacket pkt;
	av_init_packet(&pkt);
	int isI = judgeKeyFrame((uint8_t*)data, nLen);

	pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
	pkt.stream_index = pst->index; // (int)packet在stream的index位置
	pkt.data = (uint8_t*)data;
	pkt.size = nLen;

	// 第一帧为关键帧
	if (m_waitkey) {
		if (0 == (pkt.flags & AV_PKT_FLAG_KEY)) {
			return;
		}
		else
			m_waitkey = 0;
	}

	// 计算每一帧的长度
	int64_t calc_duration = (double)AV_TIME_BASE / m_fps;

	// 计算该帧的显示时间戳
	pkt.pts = (double)(m_frame_index*calc_duration) / (double)(av_q2d(pst->time_base)*AV_TIME_BASE);

	// 解码时间戳和显示时间戳相等  因为视频中没有b帧
	pkt.dts = pkt.pts;
	// 帧的时长
	pkt.duration = (double)calc_duration / (double)(av_q2d(pst->time_base)*AV_TIME_BASE);

	if (type == 0) {
		// 一帧一帧计算
		cur_pts_v = pkt.pts;
		m_frame_index++;
	}
	else
	{   // 音频帧和视频帧同步
		cur_pts_a = pkt.pts;
	}

	// 换算时间戳 (换算成已输出流中的时间基为单位的显示时间戳)
	pkt.pts = av_rescale_q_rnd(pkt.pts, pst->time_base, pst->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.dts = av_rescale_q_rnd(pkt.dts, pst->time_base, pst->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt.duration = av_rescale_q(pkt.duration, pst->time_base, pst->time_base);
	pkt.pos = -1;
	pkt.stream_index = pst->index;

	if (av_interleaved_write_frame(m_pOc, &pkt) < 0) {
		printf("cannot write frame\n");
	}

	av_free_packet(&pkt);
}

  3) 关闭视频

void FFmpegTool::CloseMp4()
{
	m_waitkey = -1;
	m_vi_nstream = -1;
	m_ai_nstream = -1;
	if (m_pOc)
		av_write_trailer(m_pOc);
	if (m_pOc && !(m_pOc->oformat->flags & AVFMT_NOFILE))
		avio_close(m_pOc->pb);
	if (m_pOc)
	{
		avformat_free_context(m_pOc);
		m_pOc = NULL;
	}
}

以上是一些主要的代码实现,大家可以参考下。里面一些参数还需要自己根据情况设置一下,比如音频的采样率 ,音频的通道,视频的帧率,分辨率等等。代码里我把我理解的地方基本都写上注释了,如果有不正确的地方,大家也多多指教,谢谢。

注: 由于公司项目,无法全部上传,所以只能上传主要实现代码。

  • 2
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 6
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值