ffmpeg录制桌面(队列方式)

vs版本:2017
ffmpeg版本号:
ffmpeg version N-102642-g864d1ef2fc Copyright © 2000-2021 the FFmpeg developers
built with gcc 8.1.0 (x86_64-win32-seh-rev0, Built by MinGW-W64 project)
configuration: --arch=x86_64 --prefix=/home/ffmpeg_static_x64 --disable-debug
libavutil 57. 0.100 / 57. 0.100
libavcodec 59. 1.100 / 59. 1.100
libavformat 59. 2.101 / 59. 2.101
libavdevice 59. 0.100 / 59. 0.100
libavfilter 8. 0.101 / 8. 0.101
libswscale 6. 0.100 / 6. 0.100
libswresample 4. 0.100 / 4. 0.100

关于ffmpeg的lib和dll,本人在csdn上上传了相关资源,并且免费下载。

之前写过ffmpeg录制桌面ffmpeg录制麦克风声音

现在需要将音视频合入一块,初步想法是在main函数里面创建两个线程,其中一个线程抓取麦克风音频数据,另外一个线程抓取桌面视频数据,然后在主线程里面将数据合并,这自然涉及音视频数据的队列。
本文介绍下ffmpeg录制桌面的队列方式。
本人用的是ffmpeg自带的AVFifoBuffer结构,在抓取到视频数据后,用的是下面这种方式入队列

av_fifo_generic_write(fifo_video, pFrameYUV->data[0], y_size, NULL);
av_fifo_generic_write(fifo_video, pFrameYUV->data[1], y_size / 4, NULL);
av_fifo_generic_write(fifo_video, pFrameYUV->data[2], y_size / 4, NULL);

这里面的写法与视频格式是AV_PIX_FMT_YUV420P有关,YUV420结构的数据,Y的大小是U的4倍,V的四倍。

从队列里面取数据时,用的是av_fifo_generic_read(fifo_video, out_buffer_yuv420, frame_size, NULL);
这里面的out_buffer_yuv420用来接上面av_fifo_generic_write写入的YUV数据,并且由于out_buffer_yuv420的空间刚好是一帧YUV420的数据大小。
uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(frame_size);

在main函数的下面语句确立了out_buffer_yuv420和pFrameYUVInMain的对应关系,即确立了out_buffer_yuv420,也就相当于确立了pFrameYUVInMain。

av_image_fill_arrays(pFrameYUVInMain->data, pFrameYUVInMain->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height, 1);

全部代码如下所示:

// FfmpegTest.cpp : Defines the entry point for the console application.
//

#include <Windows.h>
#include <conio.h>

#ifdef	__cplusplus

extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")

	//#pragma comment(lib, "avfilter.lib")
	//#pragma comment(lib, "postproc.lib")
	//#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif

AVFormatContext	*pFormatCtx_Video = NULL, *pFormatCtx_Out = NULL;
AVCodecContext	*pCodecCtx_Video = NULL;
AVCodec			*pCodec_Video = NULL;
AVFifoBuffer	*fifo_video = NULL;
int VideoIndex;


AVCodecContext	*pCodecEncodeCtx_Video = NULL;
AVCodec			*pCodecEncode_Video = NULL;


SwsContext *img_convert_ctx;
int frame_size = 0;

uint8_t *picture_buf = NULL, *frame_buf = NULL;

bool bCap = true;

int iPicCount = 0;


CRITICAL_SECTION VideoSection;


DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam);



int OpenVideoCapture()
{
	const AVInputFormat *ifmt = av_find_input_format("gdigrab");
	//这里可以加参数打开,例如可以指定采集帧率
	AVDictionary *options = NULL;
	av_dict_set(&options, "framerate", "25", NULL);
	av_dict_set(&options, "probesize", "50000000", NULL);
	//av_dict_set(&options,"offset_x","20",0);
	//The distance from the top edge of the screen or desktop
	//av_dict_set(&options,"offset_y","40",0);
	//Video frame size. The default is to capture the full screen
	//av_dict_set(&options,"video_size","320x240",0);
	if (avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options) != 0)
	{
		printf("Couldn't open input stream.(无法打开视频输入流)\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx_Video, NULL) < 0)
	{
		printf("Couldn't find stream information.(无法获取视频流信息)\n");
		return -1;
	}
	if (pFormatCtx_Video->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
	{
		printf("Couldn't find video stream information.(无法获取视频流信息)\n");
		return -1;
	}
	pCodec_Video = (AVCodec *)avcodec_find_decoder(pFormatCtx_Video->streams[0]->codecpar->codec_id);

	pCodecCtx_Video = avcodec_alloc_context3(pCodec_Video);

	if (pCodec_Video == NULL)
	{
		printf("Codec not found.(没有找到解码器)\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
	{
		printf("Could not open codec.(无法打开解码器)\n");
		return -1;
	}

	/* put sample parameters */
	pCodecCtx_Video->bit_rate = 400000;
	/* resolution must be a multiple of two */
	//pCodecCtx_Video->width = 352;
	//pCodecCtx_Video->height = 288;
	pCodecCtx_Video->width = 1920;
	pCodecCtx_Video->height = 1080;
	/* frames per second */
	AVRational timeBase;
	timeBase.num = 1;
	timeBase.den = 25;
	pCodecCtx_Video->time_base = timeBase;

	AVRational frameRate;
	frameRate.den = 1;
	frameRate.num = 25;
	pCodecCtx_Video->framerate = frameRate;

	/* emit one intra frame every ten frames
	 * check frame pict_type before passing frame
	 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
	 * then gop_size is ignored and the output of encoder
	 * will always be I frame irrespective to gop_size
	 */
	pCodecCtx_Video->gop_size = 25;
	pCodecCtx_Video->max_b_frames = 1;
	pCodecCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;


	img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, (AVPixelFormat)pFormatCtx_Video->streams[0]->codecpar->format,
		pCodecCtx_Video->width, pCodecCtx_Video->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	frame_size = av_image_get_buffer_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height, 1);
	//申请30帧缓存
	fifo_video = av_fifo_alloc(30 * av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height, 1));

	return 0;
}



static char *dup_wchar_to_utf8(wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}



int OpenOutPut()
{
	AVStream *pVideoStream = NULL, *pAudioStream = NULL;
	const char *outFileName = "test.mp4";
	avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);


	if (pFormatCtx_Video->streams[0]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
	{
		VideoIndex = 0;
		pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);

		if (!pVideoStream)
		{
			printf("can not new stream for output!\n");
			return -1;
		}

		AVRational timeBase;
		timeBase.num = 1;
		timeBase.den = 50;
		pVideoStream->time_base = timeBase;


		pCodecEncode_Video = (AVCodec *)avcodec_find_encoder(pFormatCtx_Out->oformat->video_codec);
		if (!(pCodecEncode_Video)) {
			fprintf(stderr, "Could not find encoder for '%s'\n",
				avcodec_get_name(AV_CODEC_ID_MPEG4));
			exit(1);
		}

		pCodecEncodeCtx_Video = avcodec_alloc_context3(pCodecEncode_Video);
		if (!pCodecEncodeCtx_Video) {
			fprintf(stderr, "Could not alloc an encoding context\n");
			exit(1);
		}

		pCodecEncodeCtx_Video->time_base = timeBase;
		pCodecEncodeCtx_Video->codec_id = pFormatCtx_Out->oformat->video_codec;

		pCodecEncodeCtx_Video->bit_rate = 400000;
		/* Resolution must be a multiple of two. */
		//pCodecEncodeCtx_Video->width = 352;
		//pCodecEncodeCtx_Video->height = 288;
		pCodecEncodeCtx_Video->width = 1920;
		pCodecEncodeCtx_Video->height = 1080;
		/* timebase: This is the fundamental unit of time (in seconds) in terms
		 * of which frame timestamps are represented. For fixed-fps content,
		 * timebase should be 1/framerate and timestamp increments should be
		 * identical to 1. */

		pCodecEncodeCtx_Video->gop_size = 25; /* emit one intra frame every twelve frames at most */
		pCodecEncodeCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;

		if ((avcodec_open2(pCodecEncodeCtx_Video, pCodecEncode_Video, NULL)) < 0)
		{
			printf("can not open the encoder\n");
			return -1;
		}
	}


	if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open2(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE, nullptr, nullptr) < 0)
		{
			printf("can not open output file handle!\n");
			return -1;
		}
	}

	pCodecCtx_Video->codec_type = AVMEDIA_TYPE_VIDEO;
	pCodecCtx_Video->codec_id = pFormatCtx_Out->oformat->video_codec;
	pCodecCtx_Video->pix_fmt = AV_PIX_FMT_YUV420P;//受codec->pix_fmts数组限制

	avcodec_parameters_from_context(pVideoStream->codecpar, pCodecCtx_Video);


	int iWriteResult = avformat_write_header(pFormatCtx_Out, NULL);
	if (iWriteResult < 0)
	{
		printf("can not write the header of the output file!\n");
		return -1;
	}

	return 0;
}




int main(int argc, char* argv[])
{
	avdevice_register_all();
	if (OpenVideoCapture() < 0)
	{
		return -1;
	}

	if (OpenOutPut() < 0)
	{
		return -1;
	}


	InitializeCriticalSection(&VideoSection);

	AVFrame *pFrameYUVInMain = av_frame_alloc();


	uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(frame_size);

	av_image_fill_arrays(pFrameYUVInMain->data, pFrameYUVInMain->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height, 1);

	CreateThread(NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
	AVPacket packet = { 0 };

	int ret = 0;

	while (iPicCount < 1000)
	{
		if (av_fifo_size(fifo_video) >= frame_size)
		{
			EnterCriticalSection(&VideoSection);
			av_fifo_generic_read(fifo_video, out_buffer_yuv420, frame_size, NULL);
			LeaveCriticalSection(&VideoSection);

			packet.pts = iPicCount;
			packet.dts = iPicCount;
			av_packet_rescale_ts(&packet, pCodecCtx_Video->time_base, pFormatCtx_Out->streams[0]->time_base);

			pFrameYUVInMain->width = pCodecCtx_Video->width;
			pFrameYUVInMain->height = pCodecCtx_Video->height;
			pFrameYUVInMain->format = AV_PIX_FMT_YUV420P;

			pFrameYUVInMain->pts = packet.pts;
			pFrameYUVInMain->pkt_dts = packet.pts;


			av_packet_unref(&packet);

			ret = avcodec_send_frame(pCodecEncodeCtx_Video, pFrameYUVInMain);

			ret = avcodec_receive_packet(pCodecEncodeCtx_Video, &packet);

			ret = av_interleaved_write_frame(pFormatCtx_Out, &packet);
			avio_flush(pFormatCtx_Out->pb);

			iPicCount++;
		}

	}

	printf("main end\n");
	av_frame_free(&pFrameYUVInMain);
	av_write_trailer(pFormatCtx_Out);
	avio_close(pFormatCtx_Out->pb);
	avformat_free_context(pFormatCtx_Out);

	return 0;
}



DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam)
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	AVFrame *pFrameYUV = av_frame_alloc();
	int frame_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height, 1);
	uint8_t *out_buffer_yuv420 = (uint8_t *)av_malloc(frame_size);
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer_yuv420, AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height, 1);

	int y_size = pCodecCtx_Video->width * pCodecCtx_Video->height;

	AVPacket packet = { 0 };
	int ret = 0;
	while (iPicCount < 1000)
	{
		av_packet_unref(&packet);
		if (av_read_frame(pFormatCtx_Video, &packet) < 0)
		{
			continue;
		}

		ret = avcodec_send_packet(pCodecCtx_Video, &packet);

		if (ret >= 0)
		{
			ret = avcodec_receive_frame(pCodecCtx_Video, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				break;
			}
			else if (ret < 0) {
				fprintf(stderr, "Error during decoding\n");
				break;
			}


			int iScale = sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecEncodeCtx_Video->height, pFrameYUV->data, pFrameYUV->linesize);

			if (av_fifo_space(fifo_video) >= frame_size)
			{
				EnterCriticalSection(&VideoSection);
				av_fifo_generic_write(fifo_video, pFrameYUV->data[0], y_size, NULL);
				av_fifo_generic_write(fifo_video, pFrameYUV->data[1], y_size / 4, NULL);
				av_fifo_generic_write(fifo_video, pFrameYUV->data[2], y_size / 4, NULL);
				LeaveCriticalSection(&VideoSection);
			}
			
		}


		if (ret == AVERROR(EAGAIN))
		{
			continue;
		}
	}

	av_frame_free(&pFrame);
	av_frame_free(&pFrameYUV);


	return 0;
}







  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值