ffmpeg 采集摄像头和麦克风,并推流

参考网上的代码,实现了音视频直播推流

但还是存在部分问题:延时和图像声音不匹配。

参考:

https://blog.csdn.net/n_fly/article/details/90899094  (上篇博客发送图像没声音没图像,看懂了这篇帖子是解决问题的关键因素)

https://blog.csdn.net/n_fly/article/details/90899094  (这篇博客博主上传的代码和贴出来的不一样,不过给了我继续解决问题的信心)

最后希望大家能多多分享自己的学习经验,解决这个问题我花了快一个月。 还是通过qian的方式获得了源码。知识付费啊,CSDN的太贵了。

/*
from: tanbo - jlrmyy
摄像头采集和麦克风录音,并用nginx推流
后期再换成ffmpeg4.0接口
存在问题:
1.延时
2.图像声音不匹配
*/

#include "stdafx.h"

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libswresample/swresample.h"

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")

//#pragma comment(lib, "avfilter.lib")
//#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif

AVFormatContext	*pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;
AVCodecContext	*pCodecCtx_Video;
AVCodec			*pCodec_Video;
AVFifoBuffer	*fifo_video = NULL;
AVAudioFifo		*fifo_audio = NULL;
int VideoIndex, AudioIndex;

CRITICAL_SECTION AudioSection, VideoSection;



SwsContext *img_convert_ctx;
struct SwrContext *au_convert_ctx;
int frame_size = 0;


uint8_t *picture_buf = NULL, *frame_buf = NULL;

bool bCap = true;

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );
static char *dup_wchar_to_utf8(wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}
int OpenVideoCapture()
{
	AVInputFormat *ifmt = av_find_input_format("dshow");

	//Set own video device's name
	char * psCameraName = dup_wchar_to_utf8(L"video=USB2.0 PC CAMERA");

	//AVInputFormat *ifmt=av_find_input_format("gdigrab");
	//这里可以加参数打开,例如可以指定采集帧率
	AVDictionary *options = NULL;
	av_dict_set(&options, "rtbufsize", "30412800", 0);//默认大小3041280
	//av_dict_set(&options, "framerate", "15", NULL);
	//av_dict_set(&options,"offset_x","20",0);
	//The distance from the top edge of the screen or desktop
	//av_dict_set(&options,"offset_y","40",0);
	//Video frame size. The default is to capture the full screen
	//av_dict_set(&options,"video_size","320x240",0);
	//if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options)!=0)
	if (avformat_open_input(&pFormatCtx_Video, psCameraName, ifmt, &options) != 0)
	{
		printf("Couldn't open input stream.(无法打开视频输入流)\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
	{
		printf("Couldn't find stream information.(无法获取视频流信息)\n");
		return -1;
	}
	if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
	{
		printf("Couldn't find video stream information.(无法获取视频流信息)\n");
		return -1;
	}
	pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
	pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
	if(pCodec_Video == NULL)
	{
		printf("Codec not found.(没有找到解码器)\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
	{
		printf("Could not open codec.(无法打开解码器)\n");
		return -1;
	}

	

	img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, pCodecCtx_Video->pix_fmt, 
		pCodecCtx_Video->width, pCodecCtx_Video->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

	frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
	//申请30帧缓存
	fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));

	return 0;
}

int OpenAudioCapture()
{
	//查找输入方式
	AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
	
	//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
	char * psDevName = dup_wchar_to_utf8(L"audio=麦克风 (Realtek High Definition Au");

	if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt,NULL) < 0)
	{
		printf("Couldn't open input stream.(无法打开音频输入流)\n");
		return -1;
	}

	if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0)  
		return -1; 
	
	if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
	{
		printf("Couldn't find video stream information.(无法获取音频流信息)\n");
		return -1;
	}

	AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
	if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
	{
		printf("can not find or open audio decoder!\n");
	}
	//Swr

	

	return 0;
}

int OpenOutPut()
{
	AVStream *pVideoStream = NULL, *pAudioStream = NULL;
	const char *outFileName = "rtmp://localhost:1935/live/room";
	avformat_alloc_output_context2(&pFormatCtx_Out, NULL, "flv", outFileName);
	//const char *outFileName = "d:/test.flv";
	//avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);

	if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
	{
		AVCodecContext *videoCodecCtx;
		VideoIndex = 0;
		pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);

		if (!pVideoStream)
		{
			printf("can not new stream for output!\n");
			return -1;
		}

		//set codec context param
		pVideoStream->codec->codec = avcodec_find_encoder(AV_CODEC_ID_H264);
		pVideoStream->codec->height = pFormatCtx_Video->streams[0]->codec->height;
		pVideoStream->codec->width = pFormatCtx_Video->streams[0]->codec->width;
		
		pVideoStream->codec->me_range = 16;
		pVideoStream->codec->max_qdiff = 4;
		pVideoStream->codec->qmin = 10;
		pVideoStream->codec->qmax = 51;
		pVideoStream->codec->qcompress = 0.6;

		pVideoStream->codec->time_base = pFormatCtx_Video->streams[0]->codec->time_base;
		pVideoStream->codec->sample_aspect_ratio = pFormatCtx_Video->streams[0]->codec->sample_aspect_ratio;
		// take first format from list of supported formats
		pVideoStream->codec->pix_fmt = pFormatCtx_Out->streams[VideoIndex]->codec->codec->pix_fmts[0];

		//open encoder
		if (!pVideoStream->codec->codec)
		{
			printf("can not find the encoder!\n");
			return -1;
		}

		if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
			pVideoStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

		if ((avcodec_open2(pVideoStream->codec, pVideoStream->codec->codec, NULL)) < 0)
		{
			printf("can not open the encoder\n");
			return -1;
		}
	}

	if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
	{
		AVCodecContext *pOutputCodecCtx;
		AudioIndex = 1;
		pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);

		//pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);
		pAudioStream->codec->codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
		pOutputCodecCtx = pAudioStream->codec;

		pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
		pOutputCodecCtx->channel_layout = pFormatCtx_Audio->streams[0]->codec->channel_layout;
		pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
		if(pOutputCodecCtx->channel_layout == 0)
		{
			pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
			pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);

		}
		//pOutputCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16P;
		pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
		
		AVRational time_base={1, pAudioStream->codec->sample_rate};
		pAudioStream->time_base = time_base;

		pOutputCodecCtx->codec_tag = 0;  
		if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)  
			pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

		if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
		{
			//编码器打开失败,退出程序
			return -1;
		}
	}

	if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
	{
		if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
		{
			printf("can not open output file handle!\n");
			return -1;
		}
	}

	if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
	{
		printf("can not write the header of the output file!\n");
		return -1;
	}

	return 0;
}

int _tmain(int argc, _TCHAR* argv[])
{
	av_register_all();
	avdevice_register_all();
	avformat_network_init();
	if (OpenVideoCapture() < 0)
	{
		return -1;
	}
	if (OpenAudioCapture() < 0)
	{
		return -1;
	}
	if (OpenOutPut() < 0)
	{
		return -1;
	}
	
	InitializeCriticalSection(&VideoSection);
	InitializeCriticalSection(&AudioSection);

	AVFrame *picture = av_frame_alloc();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf, 
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
		pFormatCtx_Out->streams[VideoIndex]->codec->width, 
		pFormatCtx_Out->streams[VideoIndex]->codec->height);

	
	


	//star cap screen thread
	CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
	//star cap audio thread
	CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
	int64_t cur_pts_v=0,cur_pts_a=0;
	int VideoFrameIndex = 0, AudioFrameIndex = 0;

	while(1)
	{
		if (_kbhit() != 0 && bCap)
		{
			bCap = false;
			Sleep(2000);//简单的用sleep等待采集线程关闭
		}
		if (fifo_audio && fifo_video)
		{
			int sizeAudio = av_audio_fifo_size(fifo_audio);
			int sizeVideo = av_fifo_size(fifo_video);
			//缓存数据写完就结束循环
			if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && 
				av_fifo_size(fifo_video) <= frame_size && !bCap)
			{
				break;
			}
		}

		if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base, 
			cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
		{
			//read data from fifo
			if (av_fifo_size(fifo_video) < frame_size && !bCap)
			{
				cur_pts_v = 0x7fffffffffffffff;
			}
			if(av_fifo_size(fifo_video) >= size)
			{
				EnterCriticalSection(&VideoSection);
				av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
				LeaveCriticalSection(&VideoSection);
				
				avpicture_fill((AVPicture *)picture, picture_buf, 
					pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
					pFormatCtx_Out->streams[VideoIndex]->codec->width, 
					pFormatCtx_Out->streams[VideoIndex]->codec->height);
				
				//pts = n * ((1 / timbase)/ fps);
				//picture->pts = VideoFrameIndex *AV_TIME_BASE / av_q2d(pFormatCtx_Video->streams[0]->r_frame_rate);
				picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
				int got_picture = 0;
				AVPacket pkt;
				av_init_packet(&pkt);
				
				pkt.data = NULL;
				pkt.size = 0;
				int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
				if(ret < 0)
				{
					//编码错误,不理会此帧
					continue;
				}
				
				if (got_picture==1)
				{
					int cal_duration = av_q2d(pFormatCtx_Video->streams[0]->r_frame_rate);
 					pkt.stream_index = VideoIndex;
					pkt.pts = av_rescale_q_rnd(picture->pts , pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
					pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); ;

					pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 15);

					cur_pts_v = pkt.pts;

					ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
					//delete[] pkt.data;
					av_free_packet(&pkt);
					VideoFrameIndex++;
				}
				
			}
		}
		else
		{
			if (NULL == fifo_audio)
			{
				continue;//还未初始化fifo
			}
			if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
			{
				cur_pts_a = 0x7fffffffffffffff;
			}
			if(av_audio_fifo_size(fifo_audio) >= 
				(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
			{
				AVFrame *frame;
				frame = av_frame_alloc();
				frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
				frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
				frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
				frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
				av_frame_get_buffer(frame, 0);

				EnterCriticalSection(&AudioSection);
				av_audio_fifo_read(fifo_audio, (void **)frame->data, 
					(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
				LeaveCriticalSection(&AudioSection);

				int audio_framesize = frame->nb_samples;

				AVPacket pkt_out;
				av_init_packet(&pkt_out);
				int got_picture = -1;
				pkt_out.data = NULL;
				pkt_out.size = 0;

				frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
				if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
				{
					printf("can not decoder a frame");
				}
				av_frame_free(&frame);
				if (got_picture) 
				{
					//AVRational time_base = pFormatCtx_Out->streams[AudioIndex]->time_base;//{ 1, 1000 };
					//AVRational time_base_q = { 1, AV_TIME_BASE };
					//pkt_out.pts = av_rescale_q(AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size, time_base_q, time_base);
					//pkt_out.dts = pkt_out.pts;
					//pkt_out.duration = av_rescale_q(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));



					pkt_out.stream_index = AudioIndex;
					//pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
					pkt_out.pts = av_rescale_q_rnd(AudioFrameIndex * audio_framesize, pFormatCtx_Out->streams[AudioIndex]->codec->time_base,
						pFormatCtx_Out->streams[AudioIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					pkt_out.dts = pkt_out.pts;
					pkt_out.duration = av_rescale_q_rnd(audio_framesize, pFormatCtx_Out->streams[AudioIndex]->codec->time_base,
						pFormatCtx_Out->streams[AudioIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					cur_pts_a = pkt_out.pts;
					
					int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
					av_free_packet(&pkt_out);
					AudioFrameIndex++;
				}
				
			}
		}
	}

	delete[] picture_buf;

	av_fifo_free(fifo_video);
	av_audio_fifo_free(fifo_audio);

	av_write_trailer(pFormatCtx_Out);

	avio_close(pFormatCtx_Out->pb);
	avformat_free_context(pFormatCtx_Out);

	if (pFormatCtx_Video != NULL)
	{
		avformat_close_input(&pFormatCtx_Video);
		pFormatCtx_Video = NULL;
	}
	if (pFormatCtx_Audio != NULL)
	{
		avformat_close_input(&pFormatCtx_Audio);
		pFormatCtx_Audio = NULL;
	}

	return 0;
}

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
{
	AVPacket packet;/* = (AVPacket *)av_malloc(sizeof(AVPacket))*/;
	int got_picture;
	AVFrame	*pFrame;
	pFrame=avcodec_alloc_frame();

	AVFrame *picture = avcodec_alloc_frame();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	//picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf, 
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
		pFormatCtx_Out->streams[VideoIndex]->codec->width, 
		pFormatCtx_Out->streams[VideoIndex]->codec->height);

	av_init_packet(&packet);
	int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
	int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
	int y_size=height*width;
	while(bCap)
	{
		packet.data = NULL;
		packet.size = 0;
		if (av_read_frame(pFormatCtx_Video, &packet) < 0)
		{
			continue;
		}
		if(packet.stream_index == 0)
		{
			if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
			{
				printf("Decode Error.(解码错误)\n");
				continue;
			}
			if (got_picture)
			{
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, 
					pFormatCtx_Out->streams[VideoIndex]->codec->height, picture->data, picture->linesize);
				
				if (av_fifo_space(fifo_video) >= size)
				{
					EnterCriticalSection(&VideoSection);					
					av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
					av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
					av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
					LeaveCriticalSection(&VideoSection);
				}
			}
		}
		av_free_packet(&packet);
		//Sleep(50);
	}
	av_frame_free(&pFrame);
	av_frame_free(&picture);
	//delete[] picture_buf;
	return 0;
}

DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
{
	AVPacket pkt;
	AVFrame *frame;
	frame = av_frame_alloc();
	int gotframe;
	while(bCap)
	{
		pkt.data = NULL;
		pkt.size = 0;
		if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
		{
			continue;
		}
		
		if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
		{
			av_frame_free(&frame);
			printf("can not decoder a frame");
			break;
		}
		av_free_packet(&pkt);

		if (!gotframe)
		{
			continue;//没有获取到数据,继续下一次
		}
		if (NULL == fifo_audio)
		{
			fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
				pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
		}

		if (pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt != pFormatCtx_Audio->streams[0]->codec->sample_fmt
			|| pFormatCtx_Out->streams[AudioIndex]->codec->channels != pFormatCtx_Audio->streams[0]->codec->channels
			|| pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate != pFormatCtx_Audio->streams[0]->codec->sample_rate)
		{
			if (frame->channels > 0 && frame->channel_layout == 0)
				frame->channel_layout = av_get_default_channel_layout(frame->channels);
			else if (frame->channels == 0 && frame->channel_layout > 0)
				frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);

			au_convert_ctx = swr_alloc();
			au_convert_ctx = swr_alloc_set_opts(au_convert_ctx,
				frame->channel_layout, pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt, pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate,
				pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout, pFormatCtx_Audio->streams[0]->codec->sample_fmt, pFormatCtx_Audio->streams[0]->codec->sample_rate,
				0,
				NULL);

			int ret = swr_init(au_convert_ctx);

			如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做
			int64_t dst_nb_samples = av_rescale_rnd(
				swr_get_delay(au_convert_ctx, frame->sample_rate) + frame->nb_samples,
				frame->sample_rate,
				frame->sample_rate,
				AV_ROUND_UP);
			申请足够的buffer空间
			//int upperBoundSamples = swr_get_out_samples(au_convert_ctx, frame->nb_samples * frame->channels);
			//short* pBuffer = new short[upperBoundSamples];

			uint8_t **converted_samples;
			converted_samples = (uint8_t **)calloc(pFormatCtx_Out->streams[AudioIndex]->codec->channels,
				sizeof(converted_samples));
			av_samples_alloc(converted_samples, NULL,
				pFormatCtx_Out->streams[AudioIndex]->codec->channels,
				frame->nb_samples,
				pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt, 0);
			//swr_convert(au_convert_ctx, pFrameMP3->data, pFrameMP3->nb_samples, (const uint8_t**)m_ain, pFrame_audio->nb_samples);
			int nb = swr_convert(au_convert_ctx,
				converted_samples, dst_nb_samples,
				(const uint8_t**)frame->extended_data, frame->nb_samples);

			int data_size = frame->channels * nb * av_get_bytes_per_sample(pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt);

			EnterCriticalSection(&AudioSection);
			av_audio_fifo_realloc(fifo_audio, av_audio_fifo_size(fifo_audio) + dst_nb_samples);
			av_audio_fifo_write(fifo_audio, (void **)converted_samples, dst_nb_samples);
			LeaveCriticalSection(&AudioSection);
			av_freep(&converted_samples[0]);
		}
		else
		{
			int buf_space = av_audio_fifo_space(fifo_audio);
			if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
			{
				EnterCriticalSection(&AudioSection);
				av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
				LeaveCriticalSection(&AudioSection);
			}
		}

		
	}
	av_frame_free(&frame);
	return 0;
}

 

  • 2
    点赞
  • 31
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
敬告:该系列的课程在抓紧录制更新中,敬请大家关注。敬告: 该系列的课程涉及:FFmpeg,WebRTC,SRS,Nginx,Darwin,Live555,等。包括:音视频、流媒体、直播、Android、视频监控28181、等。  我将带领大家一起来学习:采集麦克风、PCM重采样、AAC或MP3音频编码存储、并直播。具体内容包括:1.FFmpeg采集麦克风并存储为PCM。2.PCM重采样原理及实战。3.采集麦克风并实时转码AAC或MP3并保存文件。4.采集麦克风并实时转码AAC并可以直播。 音视频与流媒体是一门很复杂的技术,涉及的概念、原理、理论非常多,很多初学者不学 基础理论,而是直接做项目,往往会看到c/c++代码时一头雾水,不知道代码到底是什么意思,这是为什么呢? 因为没有学习音视频和流媒体的基础理论,就比如学习英语,不学习基本单词,而是天天听英语新闻,总也听不懂。所以呢,一定要认真学习基础理论,然后再学习播放器、转码器、非编、流媒体直播、视频监控、等等。 梅老师从事音视频与流媒体行业18年;曾在永新视博、中科大洋、百度、美国Harris广播事业部等公司就职,经验丰富;曾亲手主导广电直播全套项目,精通h.264/h.265/aac,曾亲自参与百度app上的网页播放器等实战产品。 目前全身心自主创业,主要聚焦音视频+流媒体行业,精通音视频加密、流媒体在线转码快编等热门产品。
要使用ffmpeg采集摄像头数据并进行推流,你可以使用以下命令: 1. 首先,使用命令`ffmpeg -list_devices true -f dshow -i dummy`获取摄像头的名称\[1\]。 2. 然后,使用以下命令来获取视频流并推流: ``` ffmpeg -f dshow -i video="摄像头名称" -vcodec libx264 -acodec copy -preset:v ultrafast -tune:v zerolatency -f flv <推流地址> ``` 这个命令将会获取摄像头的视频流,并使用libx264编码器进行视频编码,音频则直接复制。推流地址是你要推送到的目标地址\[1\]。 另外,如果你使用的是Linux系统,可以使用以下命令来采集摄像头数据并推流: ``` ./ffmpeg -f video4linux2 -r 12 -s 640x480 -i /dev/video0 -vcodec libx264 -f flv rtmp://127.0.0.1:1935/live/live ``` 这个命令将会采集/dev/video0设备的视频流,并使用libx264编码器进行视频编码,然后将视频流推送到rtmp://127.0.0.1:1935/live/live地址\[2\]。 希望以上信息对你有帮助! #### 引用[.reference_title] - *1* [FFMPEG采集摄像头推流方法说明](https://blog.csdn.net/az44yao/article/details/98104615)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insertT0,239^v3^insert_chatgpt"}} ] [.reference_item] - *2* *3* [linux FFMPEG 摄像头采集数据推流](https://blog.csdn.net/hanhui22/article/details/109842044)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insertT0,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值