ffmpeg采集摄像头,h264压缩,保存为mp4文件

接上篇:ffmpeg采集摄像头保存为h264裸流文件,可以用potplayer、VLC播放

我们更进一步,保存为更通用的mp4文件

#define CODEC_FLAG_GLOBAL_HEADER (1 << 22)

static void encodeMP4(AVCodecContext* enc_ctx, AVFormatContext* inFmtCtx, AVFormatContext* outFmtCtx, AVFrame* frame,
                      AVPacket* pkt, int frameIndex)
{
	int ret;

	/* send the frame to the encoder */
	if (frame)
		printf("Send frame %3", frame->pts);

	ret = avcodec_send_frame(enc_ctx, frame);
	if (ret < 0)
	{
		fprintf(stderr, "Error sending a frame for encoding\n");
		exit(1);
	}

	while (ret >= 0)
	{
		ret = avcodec_receive_packet(enc_ctx, pkt);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			return;
		else if (ret < 0)
		{
			fprintf(stderr, "Error during encoding\n");
			exit(1);
		}


		AVStream* in_stream = inFmtCtx->streams[pkt->stream_index];
		AVRational timeBase = in_stream->time_base;
		AVStream* out_stream = outFmtCtx->streams[pkt->stream_index];

		///如果没有显示时间戳自己加上时间戳并且将显示时间戳赋值给解码时间戳
		if (pkt->pts == AV_NOPTS_VALUE)
		{
			//Write PTS
			AVRational time_base1 = timeBase;
			//Duration between 2 frames (us)
			int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
			//Parameters
			pkt->pts = (double)(frameIndex * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
			pkt->dts = pkt->pts;
			pkt->duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
			//m_frame_index++;
		}

		//Convert PTS/DTS
		pkt->pts = av_rescale_q_rnd(pkt->pts, timeBase, out_stream->time_base,
		                            (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt->dts = av_rescale_q_rnd(pkt->dts, timeBase, out_stream->time_base,
		                            (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt->duration = av_rescale_q(pkt->duration, timeBase, out_stream->time_base);

		pkt->pos = -1;

		printf("Write packet %d size=%d \n", pkt->size, pkt->pts);

		if (av_interleaved_write_frame(outFmtCtx, pkt) < 0)
		{
			printf("Error muxing packet\n");
		}

		av_packet_unref(pkt);
	}
}

int main()
{
	avdevice_register_all();

	AVFormatContext* camFmtCtx = avformat_alloc_context();
	AVDictionary* options = NULL;
	//av_dict_set(&options, "list_devices", "true", 0);
	//av_dict_set_int(&options, "rtbufsize", 18432000, 0);
	AVInputFormat* iformat = av_find_input_format("dshow");
	puts("Device Option Info======");
	int ret = avformat_open_input(&camFmtCtx, "video=Vimicro USB 2.0 PC Camera (Venus)", iformat, &options);
	if (ret != 0)
	{
		av_dict_free(&options);
		return -1;
	}
	//查找输入流
	ret = avformat_find_stream_info(camFmtCtx, NULL);
	if (ret < 0)
	{
		cout << "无法获取流的信息" << endl;
		return -1;
	}
	int videoindex = -1;
	for (int i = 0; i < camFmtCtx->nb_streams; i++)
	{
		if (camFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoindex = i;
			break;
		}
	}
	//查找摄像头可用解码器
	AVCodecContext* camDecodeCtx = camFmtCtx->streams[videoindex]->codec;
	AVRational time_base = camFmtCtx->streams[videoindex]->time_base;

	AVCodecID codecID = camFmtCtx->streams[videoindex]->codecpar->codec_id;
	// cout << "codecID = " << codecID << endl;
	AVCodec* codec = avcodec_find_decoder(codecID);
	if (codec == NULL)
	{
		cout << "没有解码器" << endl;
		return -1;
	}

	ret = avcodec_open2(camDecodeCtx, codec,NULL);
	if (ret < 0)
	{
		cout << "avodec_open2 error" << endl;
		return -1;
	}
	cout << "解码器打开成功" << endl;

	//H264编码器,encode函数使用
	AVCodecID encodeID = AV_CODEC_ID_H264;
	AVCodec* encodec = avcodec_find_encoder(encodeID);
	//avcodec_find_encoder_by_name();
	if (!encodec)
	{
		cout << "encodec == NULL" << endl;
	}
	AVCodecContext* encodeCtx = avcodec_alloc_context3(encodec);
	if (!encodeCtx)
	{
		cout << "enc == NULL" << endl;
	}

	encodeCtx->bit_rate = 400000;
	encodeCtx->width = camDecodeCtx->width;
	encodeCtx->height = camDecodeCtx->height;
	encodeCtx->time_base = {1, 25};
	encodeCtx->framerate = {25, 1};
	encodeCtx->gop_size = 10;
	encodeCtx->max_b_frames = 1;
	encodeCtx->pix_fmt = AV_PIX_FMT_YUV420P;
	//加载预设
	av_opt_set(encodeCtx->priv_data, "preset", "slow", 0);
	av_opt_set(encodeCtx->priv_data, "tune", "zerolatency", 0);

	ret = avcodec_open2(encodeCtx, encodec,NULL);
	if (ret < 0)
	{
		cout << "encodec open error" << endl;
		exit(-1);
	}

	// FILE* fp = nullptr;
	// fopen_s(&fp, "1.h264", "wb");
	/改变为保存mp4方式
	AVFormatContext* outFmtCtx = nullptr;
	const char* fileName = "1.mp4";
	avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, fileName);
	if (!outFmtCtx)
	{
		printf("Could not create output context\n");
		return -4;
	}
	for (int i = 0; i < camFmtCtx->nb_streams; i++)
	{
		AVStream* in_stream = camFmtCtx->streams[i];
		AVStream* out_stream = avformat_new_stream(outFmtCtx, in_stream->codec->codec);
		if (!out_stream)
		{
			printf("Failed allocating output stream\n");
			return -5;
		}
		//Copy the settings of AVCodecContext
		if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0)
		{
			printf("Failed to copy context from input to output stream codec context\n");
			return -6;
		}
		out_stream->codec->codec_tag = 0;
		if (outFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
		{
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
		}

		avcodec_parameters_from_context(out_stream->codecpar, encodeCtx); //必须
	}

	av_dump_format(outFmtCtx, 0, fileName, 1);

	if (!(outFmtCtx->oformat->flags & AVFMT_NOFILE))
	{
		ret = avio_open(&outFmtCtx->pb, fileName, AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			printf("Could not open output file '%s'", fileName);
			return -7;
		}
	}


	if (avformat_write_header(outFmtCtx, NULL) < 0)
	{
		printf("Error occurred when opening output file\n");
		return -8;
	}
	/改变为保存mp4方式

	AVPacket* packetIn = av_packet_alloc();
	AVPacket* packetOut = av_packet_alloc();
	AVFrame* pFrameOut = av_frame_alloc();

	int got_picture;

	struct SwsContext* img_convert_ctx = sws_getContext(camDecodeCtx->width, camDecodeCtx->height,
	                                                    camDecodeCtx->pix_fmt, camDecodeCtx->width,
	                                                    camDecodeCtx->height, AV_PIX_FMT_YUV420P,
	                                                    /*SWS_FAST_BILINEAR*/SWS_BICUBIC, NULL, NULL, NULL);

	unsigned char* out_buffer = (unsigned char*)av_malloc(
		av_image_get_buffer_size(AV_PIX_FMT_YUV420P, camDecodeCtx->width, camDecodeCtx->height, 16));

	for (int i = 0; i < 250; i++)
	{
		ret = av_read_frame(camFmtCtx, packetIn); //摄像头取到packet,要转为pFrameYUV的yuv格式

		if (ret >= 0 && packetIn->stream_index == videoindex)
		{
			AVFrame* pFrameYUV = av_frame_alloc();

			pFrameYUV->format = AV_PIX_FMT_YUV420P;
			pFrameYUV->width = camDecodeCtx->width;
			pFrameYUV->height = camDecodeCtx->height;

			avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, camDecodeCtx->width,
			               camDecodeCtx->height);

			avcodec_decode_video2(camDecodeCtx, pFrameOut, &got_picture, packetIn);
			if (got_picture)
			{
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrameOut->data, pFrameOut->linesize, 0,
				          camDecodeCtx->height,
				          pFrameYUV->data, pFrameYUV->linesize);
			}
			unsigned int untime = GetTickCount();
			//pFrameYUV->pts = untime;

			//encode(encodeCtx, pFrameYUV, packetOut, fp); //编码yuv
			/改变为保存mp4方式
			encodeMP4(encodeCtx, camFmtCtx, outFmtCtx, pFrameYUV, packetOut, i);
			/改变为保存mp4方式

			av_free(pFrameYUV);
		}
	}

	/改变为保存mp4方式
	//Write file trailer
	av_write_trailer(outFmtCtx);

	if (outFmtCtx && !(outFmtCtx->oformat->flags & AVFMT_NOFILE))
		avio_close(outFmtCtx->pb);
	avformat_free_context(outFmtCtx);
	改变为保存mp4方式

	av_free(pFrameOut);
	av_packet_free(&packetIn);
	av_packet_free(&packetOut);
	sws_freeContext(img_convert_ctx);
	avcodec_free_context(&encodeCtx);
	av_free(out_buffer);

	//fclose(fp);

	//avcodec_free_context(&camDecodeCtx);
	avformat_close_input(&camFmtCtx);
	avformat_free_context(camFmtCtx);
	av_dict_free(&options);
}

  • 0
    点赞
  • 25
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
敬告:该系列的课程在抓紧录制更新中,敬请大家关注。敬告:本课程项目仅供学习参考,请不要直接商用,概不负责任何法律责任。 该系列的课程涉及:FFmpeg,WebRTC,SRS,Nginx,Darwin,Live555,等。包括:音视频、流媒体、直播、Android、视频监控28181、等。 我将带领大家一起来学习使用FFmpeg开发视频监控项目,并动手操练。具体内容包括: 一、视频监控的架构和流程二、FFmpeg4.3+SDL2+Qt5开发环境的搭建三、FFmpeg的SDK编程回顾总结并操练四、SDL2.0的编程回顾总结并操练五、颜色空间转换RGB和YUV的原理与实战六、Qt5+FFmpeg本地摄像头采集预览实战七、代码封装:摄像头h264/5编码并存储八、Qt5+FFmpeg单路网络摄像头采集预览九、Qt5+FFmpeg单路网络摄像头采集预览录制会看十、onvif与GB/T-28181的简介  音视频与流媒体是一门很复杂的技术,涉及的概念、原理、理论非常多,很多初学者不学 基础理论,而是直接做项目,往往会看到c/c++的代码时一头雾水,不知道代码到底是什么意思,这是为什么呢?   因为没有学习音视频和流媒体的基础理论,就比如学习英语,不学习基本单词,而是天天听英语新闻,总也听不懂。 所以呢,一定要认真学习基础理论,然后再学习播放器、转码器、非编、流媒体直播、视频监控、等等。   梅老师从事音视频与流媒体行业18年;曾在永新视博、中科大洋、百度、美国Harris广播事业部等公司就职,经验丰富;曾亲手主导广电直播全套项目,精通h.264/h.265/aac,曾亲自参与百度app上的网页播放器等实战产品。  目前全身心自主创业,主要聚焦音视频+流媒体行业,精通音视频加密、流媒体在线转码快编等热门产品。  

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值