ffmpeg录制麦克风声音和pc内部声音(如播放歌曲)---混音--修正

该博客介绍了如何使用ffmpeg解决麦克风和系统声音混合时因采样率不同导致的播放速度不匹配问题。通过重采样将系统声音从48000Hz转换为44100Hz,然后进行音频混合,确保输出文件的正确播放。同时,文章展示了使用C++和ffmpeg库实现这一过程的详细代码,包括音频捕获、重采样、混合和输出。
摘要由CSDN通过智能技术生成

之前写过一篇混音的文章,ffmpeg录制麦克风声音和pc内部声音(如播放歌曲)—混音
但是有瑕疵,主要是因为本地麦克风设备的采样频率是44100,而pc内部声音的采样率是48000,所以当最终输出的文件的采样率为44100时,文件里面的声音听起来会播放的慢一些。故在音频混合前,先将pc内部声音重采样成44100,重采样的转换过程如下:

uint8_t* out_buffer = (uint8_t*)frame_audio_inner_resample->data[0];

			int nb = swr_convert(audio_convert_ctx, &out_buffer, dst_nb_samples, (const uint8_t**)frame_audio_inner->data, frame_audio_inner->nb_samples);

这里面用的out_buffer只引用到了data[0],没引用到data[1],主要是因为sample_format设置的是AV_SAMPLE_FMT_S16,非平面模式。

本人在代码里面专门新建了一个线程AudioInnerResampleThreadProc,用于将系统声音从48000重采样到44100,然后将重采样数据入队列,主线程将麦克风声音和重采样的系统声音进行混合,写入文件。

下面是具体代码:

// FfmpegAudioTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include <Windows.h>
#include <conio.h>

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"

#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif




AVFormatContext	*pFormatCtx_AudioInner = NULL;
AVFormatContext	*pFormatCtx_AudioMic = NULL;
AVFormatContext *pFormatCtx_OutputAudio = NULL;

AVCodecContext *pReadCodecContext = NULL;
AVCodecContext *pReadMicCodecContext = NULL;

int AudioIndex = 0;

AVCodecContext	*pCodecEncodeCtx_Audio = NULL;
AVCodec			*pCodecEncode_Audio = NULL;


AVAudioFifo		*fifo_audio_inner = NULL;
AVAudioFifo		*fifo_audio_mic = NULL;

AVAudioFifo		*fifo_audio_inner_resample = NULL;

SwrContext *audio_convert_ctx = NULL;

uint8_t *picture_buf = NULL, *frame_buf = NULL;

bool bCap = true;

int AudioFrameIndex = 0;
int AudioMicFrameIndex = 0;


int64_t cur_pts_a = 0;
int64_t cur_pts_a_mic = 0;



AVFilterGraph* _filter_graph = NULL;
AVFilterContext* _filter_ctx_src_inner = NULL;
AVFilterContext* _filter_ctx_src_mic = NULL;
AVFilterContext* _filter_ctx_sink = NULL;



CRITICAL_SECTION AudioSection_inner;
CRITICAL_SECTION AudioSection_mic;

CRITICAL_SECTION AudioSection_inner_resample;


DWORD WINAPI AudioInnerCapThreadProc(LPVOID lpParam);
DWORD WINAPI AudioInnerResampleThreadProc(LPVOID lpParam);
DWORD WINAPI AudioMicCapThreadProc(LPVOID lpParam);



typedef struct BufferSourceContext {
	const AVClass    *bscclass;
	AVFifoBuffer     *fifo;
	AVRational        time_base;     ///< time_base to set in the output link
	AVRational        frame_rate;    ///< frame_rate to set in the output link
	unsigned          nb_failed_requests;
	unsigned          warning_limit;

	/* video only */
	int               w, h;
	enum AVPixelFormat  pix_fmt;
	AVRational        pixel_aspect;
	char              *sws_param;

	AVBufferRef *hw_frames_ctx;

	/* audio only */
	int sample_rate;
	enum AVSampleFormat sample_fmt;
	int channels;
	uint64_t channel_layout;
	char    *channel_layout_str;

	int got_format_from_params;
	int eof;
} BufferSourceContext;



static char *dup_wchar_to_utf8(const wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}


/* just pick the highest supported samplerate */
static int select_sample_rate(const AVCodec *codec)
{
	const int *p;
	int best_samplerate = 0;

	if (!codec->supported_samplerates)
		return 44100;

	p = codec->supported_samplerates;
	while (*p) {
		if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
			best_samplerate = *p;
		p++;
	}
	return best_samplerate;
}




/* select layout with the highest channel count */
static int select_channel_layout(const AVCodec *codec)
{
	const uint64_t *p;
	uint64_t best_ch_layout = 0;
	int best_nb_channels = 0;

	if (!codec->channel_layouts)
		return AV_CH_LAYOUT_STEREO;

	p = codec->channel_layouts;
	while (*p) {
		int nb_channels = av_get_channel_layout_nb_channels(*p);

		if (nb_channels > best_nb_channels) {
			best_ch_layout = *p;
			best_nb_channels = nb_channels;
		}
		p++;
	}
	return best_ch_layout;
}


int InitFilter(const char* filter_desc)
{
	char args_inner[512];
	const char* pad_name_inner = "in0";
	char args_mic[512];
	const char* pad_name_mic = "in1";

	AVFilter* filter_src_spk = (AVFilter *)avfilter_get_by_name("abuffer");
	AVFilter* filter_src_mic = (AVFilter *)avfilter_get_by_name("abuffer");
	AVFilter* filter_sink = (AVFilter *)avfilter_get_by_name("abuffersink");
	AVFilterInOut* filter_output_inner = avfilter_inout_alloc();
	AVFilterInOut* filter_output_mic = avfilter_inout_alloc();
	AVFilterInOut* filter_input = avfilter_inout_alloc();
	_filter_graph = avfilter_graph_alloc();

	/*
	sprintf_s(args_inner, sizeof(args_inner), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
		pReadCodecContext->time_base.num,
		pReadCodecContext->time_base.den,
		pReadCodecContext->sample_rate,
		av_get_sample_fmt_name((AVSampleFormat)pReadCodecContext->sample_fmt),
		pReadCodecContext->channel_layout);
		*/

	sprintf_s(args_inner, sizeof(args_inner), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
		pReadMicCodecContext->time_base.num,
		pReadMicCodecContext->time_base.den,
		pReadMicCodecContext->sample_rate,
		av_get_sample_fmt_name((AVSampleFormat)pReadMicCodecContext->sample_fmt),
		pReadMicCodecContext->channel_layout);

	sprintf_s(args_mic, sizeof(args_mic), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
		pReadMicCodecContext->time_base.num,
		pReadMicCodecContext->time_base.den,
		pReadMicCodecContext->sample_rate,
		av_get_sample_fmt_name((AVSampleFormat)pReadMicCodecContext->sample_fmt),
		pReadMicCodecContext->channel_layout);

	//sprintf_s(args_spk, sizeof(args_spk), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x", _fmt_ctx_out->streams[_index_a_out]->codec->time_base.num, _fmt_ctx_out->streams[_index_a_out]->codec->time_base.den, _fmt_ctx_out->streams[_index_a_out]->codec->sample_rate, av_get_sample_fmt_name(_fmt_ctx_out->streams[_index_a_out]->codec->sample_fmt), _fmt_ctx_out->streams[_index_a_out]->codec->channel_layout);
	//sprintf_s(args_mic, sizeof(args_mic), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x", _fmt_ctx_out->streams[_index_a_out]->codec->time_base.num, _fmt_ctx_out->streams[_index_a_out]->codec->time_base.den, _fmt_ctx_out->streams[_index_a_out]->codec->sample_rate, av_get_sample_fmt_name(_fmt_ctx_out->streams[_index_a_out]->codec->sample_fmt), _fmt_ctx_out->streams[_index_a_out]->codec->channel_layout);


	int ret = 0;
	ret = avfilter_graph_create_filter(&_filter_ctx_src_inner, filter_src_spk, pad_name_inner, args_inner, NULL, _filter_graph);
	if (ret < 0)
	{
		printf("Filter: failed to call avfilter_graph_create_filter -- src inner\n");
		return -1;
	}
	ret = avfilter_graph_create_filter(&_filter_ctx_src_mic, filter_src_mic, pad_name_mic, args_mic, NULL, _filter_graph);
	if (ret < 0)
	{
		printf("Filter: failed to call avfilter_graph_create_filter -- src mic\n");
		return -1;
	}

	ret = avfilter_graph_create_filter(&_filter_ctx_sink, filter_sink, "out", NULL, NULL, _filter_graph);
	if (ret < 0)
	{
		printf("Filter: failed to call avfilter_graph_create_filter -- sink\n");
		return -1;
	}
	AVCodecContext* encodec_ctx = pCodecEncodeCtx_Audio;
	ret = av_opt_set_bin(_filter_ctx_sink, "sample_fmts", (uint8_t*)&encodec_ctx->sample_fmt, sizeof(encodec_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN);
	if (ret < 0)
	{
		printf("Filter: failed to call av_opt_set_bin -- sample_fmts\n");
		return -1;
	}
	ret = av_opt_set_bin(_filter_ctx_sink, "channel_layouts", (uint8_t*)&encodec_ctx->channel_layout, sizeof(encodec_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
	if (ret < 0)
	{
		printf("Filter: failed to call av_opt_set_bin -- channel_layouts\n");
		return -1;
	}
	ret = av_opt_set_bin(_filter_ctx_sink, "sample_rates", (uint8_t*)&encodec_ctx->sample_rate, sizeof(encodec_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN);
	if (ret < 0)
	{
		printf("Filter: failed to call av_opt_set_bin -- sample_rates\n");
		return -1;
	}

	filter_output_inner->name = av_strdup(pad_name_inner);
	filter_output_inner->filter_ctx = _filter_ctx_src_inner;
	filter_output_inner->pad_idx = 0;
	filter_output_inner->next = filter_output_mic;

	filter_output_mic->name = av_strdup(pad_name_mic);
	filter_output_mic->filter_ctx = _filter_ctx_src_mic;
	filter_output_mic->pad_idx = 0;
	filter_output_mic->next = NULL;

	filter_input->name = av_strdup("out");
	filter_input->filter_ctx = _filter_ctx_sink;
	filter_input->pad_idx = 0;
	filter_input->next = NULL;

	AVFilterInOut* filter_outputs[2];
	filter_outputs[0] = filter_output_inner;
	filter_outputs[1] = filter_output_mic;

	ret = avfilter_graph_parse_ptr(_filter_graph, filter_desc, &filter_input, filter_outputs, NULL);
	if (ret < 0)
	{
		printf("Filter: failed to call avfilter_graph_parse_ptr\n");
		return -1;
	}

	ret = avfilter_graph_config(_filter_graph, NULL);
	if (ret < 0)
	{
		printf("Filter: failed to call avfilter_graph_config\n");
		return -1;
	}

	avfilter_inout_free(&filter_input);
	av_free(filter_src_spk);
	av_free(filter_src_mic);
	avfilter_inout_free(filter_outputs);
	//av_free(filter_outputs);

	char* temp = avfilter_graph_dump(_filter_graph, NULL);
	printf("%s\n", temp);

	return 0;
}


int OpenAudioCapture()
{
	//查找输入方式
	const AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");

	//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
	//const char * psDevName = dup_wchar_to_utf8(L"audio=麦克风 (2- Synaptics HD Audio)");
	char * psDevName = dup_wchar_to_utf8(L"audio=virtual-audio-capturer");

	if (avformat_open_input(&pFormatCtx_AudioInner, psDevName, pAudioInputFmt, NULL) < 0)
	{
		printf("Couldn't open input stream.(无法打开音频输入流)\n");
		return -1;
	}

	if (pFormatCtx_AudioInner->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
	{
		printf("Couldn't find video stream information.(无法获取音频流信息)\n");
		return -1;
	}


	const AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_AudioInner->streams[0]->codecpar->codec_id);

	pReadCodecContext = avcodec_alloc_context3(tmpCodec);

	//pReadCodecContext->sample_rate = select_sample_rate(tmpCodec);
	pReadCodecContext->sample_rate = pFormatCtx_AudioInner->streams[0]->codecpar->sample_rate;
	pReadCodecContext->channel_layout = select_channel_layout(tmpCodec);
	pReadCodecContext->channels = av_get_channel_layout_nb_channels(pReadCodecContext->channel_layout);

	pReadCodecContext->sample_fmt = (AVSampleFormat)pFormatCtx_AudioInner->streams[0]->codecpar->format;
	//pReadCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;

	if (0 > avcodec_open2(pReadCodecContext, tmpCodec, NULL))
	{
		printf("can not find or open audio decoder!\n");
	}

	avcodec_parameters_from_context(pFormatCtx_AudioInner->streams[0]->codecpar, pReadCodecContext);

	return 0;
}


int OpenAudioMicCapture()
{
	//查找输入方式
	const AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");

	//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
	const char * psDevName = dup_wchar_to_utf8(L"audio=麦克风 (2- Synaptics HD Audio)");

	if (avformat_open_input(&pFormatCtx_AudioMic, psDevName, pAudioInputFmt, NULL) < 0)
	{
		printf("Couldn't open input stream.(无法打开音频输入流)\n");
		return -1;
	}

	if (avformat_find_stream_info(pFormatCtx_AudioMic, NULL) < 0)
	{
		return -1;
	}

	if (pFormatCtx_AudioMic->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
	{
		printf("Couldn't find video stream information.(无法获取音频流信息)\n");
		return -1;
	}


	const AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_AudioMic->streams[0]->codecpar->codec_id);

	pReadMicCodecContext = avcodec_alloc_context3(tmpCodec);

	pReadMicCodecContext->sample_rate = select_sample_rate(tmpCodec);
	pReadMicCodecContext->channel_layout = select_channel_layout(tmpCodec);
	pReadMicCodecContext->channels = av_get_channel_layout_nb_channels(pReadMicCodecContext->channel_layout);

	pReadMicCodecContext->sample_fmt = (AVSampleFormat)pFormatCtx_AudioMic->streams[0]->codecpar->format;
	//pReadCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;

	if (0 > avcodec_open2(pReadMicCodecContext, tmpCodec, NULL))
	{
		printf("can not find or open audio decoder!\n");
	}

	avcodec_parameters_from_context(pFormatCtx_AudioMic->streams[0]->codecpar, pReadMicCodecContext);


	return 0;
}


int OpenOutPut()
{
	AVStream *pAudioStream = NULL;

	const char *outFileName = "InnerAudioMicAudioMix.mp4";
	avformat_alloc_output_context2(&pFormatCtx_OutputAudio, NULL, NULL, outFileName);


	//if (pFormatCtx_AudioInner->streams[0]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
	{
		AVCodecContext *pOutputCodecCtx;
		pAudioStream = avformat_new_stream(pFormatCtx_OutputAudio, NULL);

		AudioIndex = 0;

		pCodecEncode_Audio = (AVCodec *)avcodec_find_encoder(pFormatCtx_OutputAudio->oformat->audio_codec);

		pCodecEncodeCtx_Audio = avcodec_alloc_context3(pCodecEncode_Audio);
		if (!pCodecEncodeCtx_Audio) {
			fprintf(stderr, "Could not alloc an encoding context\n");
			exit(1);
		}


		//pCodecEncodeCtx_Audio->codec_id = pFormatCtx_OutputAudio->oformat->audio_codec;
		pCodecEncodeCtx_Audio->sample_fmt = pCodecEncode_Audio->sample_fmts ? pCodecEncode_Audio->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
		pCodecEncodeCtx_Audio->bit_rate = 64000;
		pCodecEncodeCtx_Audio->sample_rate = 44100;
		if (pCodecEncode_Audio->supported_samplerates) {
			pCodecEncodeCtx_Audio->sample_rate = pCodecEncode_Audio->supported_samplerates[0];
			for (int i = 0; pCodecEncode_Audio->supported_samplerates[i]; i++) {
				if (pCodecEncode_Audio->supported_samplerates[i] == 44100)
					pCodecEncodeCtx_Audio->sample_rate = 44100;
			}
		}

		pCodecEncodeCtx_Audio->channels = av_get_channel_layout_nb_channels(pCodecEncodeCtx_Audio->channel_layout);
		pCodecEncodeCtx_Audio->channel_layout = AV_CH_LAYOUT_STEREO;
		if (pCodecEncode_Audio->channel_layouts) {
			pCodecEncodeCtx_Audio->channel_layout = pCodecEncode_Audio->channel_layouts[0];
			for (int i = 0; pCodecEncode_Audio->channel_layouts[i]; i++) {
				if (pCodecEncode_Audio->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
					pCodecEncodeCtx_Audio->channel_layout = AV_CH_LAYOUT_STEREO;
			}
		}
		pCodecEncodeCtx_Audio->channels = av_get_channel_layout_nb_channels(pCodecEncodeCtx_Audio->channel_layout);


		AVRational timeBase;
		timeBase.den = pCodecEncodeCtx_Audio->sample_rate;
		timeBase.num = 1;
		pAudioStream->time_base = timeBase;

		if (avcodec_open2(pCodecEncodeCtx_Audio, pCodecEncode_Audio, 0) < 0)
		{
			//编码器打开失败,退出程序
			return -1;
		}
	}

	if (!(pFormatCtx_OutputAudio->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&pFormatCtx_OutputAudio->pb, outFileName, AVIO_FLAG_WRITE) < 0)
		{
			printf("can not open output file handle!\n");
			return -1;
		}
	}

	avcodec_parameters_from_context(pAudioStream->codecpar, pCodecEncodeCtx_Audio);

	if (avformat_write_header(pFormatCtx_OutputAudio, NULL) < 0)
	{
		printf("can not write the header of the output file!\n");
		return -1;
	}

	return 0;
}


int main(int argc, char* argv[])
{
	int ret = 0;

	AVSampleFormat sample_fmt = AV_SAMPLE_FMT_S16;
	int iSize = av_get_bytes_per_sample(sample_fmt);


	avdevice_register_all();


	audio_convert_ctx = swr_alloc();
	av_opt_set_channel_layout(audio_convert_ctx, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
	av_opt_set_channel_layout(audio_convert_ctx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
	av_opt_set_int(audio_convert_ctx, "in_sample_rate", 48000, 0);
	av_opt_set_int(audio_convert_ctx, "out_sample_rate", 44100, 0);
	av_opt_set_sample_fmt(audio_convert_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
	av_opt_set_sample_fmt(audio_convert_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
	//av_opt_set_sample_fmt(audio_convert_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);

	ret = swr_init(audio_convert_ctx);

	if (OpenAudioCapture() < 0)
	{
		return -1;
	}

	if (OpenAudioMicCapture() < 0)
	{
		return -1;
	}

	if (OpenOutPut() < 0)
	{
		return -1;
	}

	const char* filter_desc = "[in0][in1]amix=inputs=2[out]";
	ret = InitFilter(filter_desc);
	if (ret < 0)
	{
		return -1;
	}

	InitializeCriticalSection(&AudioSection_inner);
	InitializeCriticalSection(&AudioSection_mic);

	InitializeCriticalSection(&AudioSection_inner_resample);
	

	HANDLE hThreadAudioInner = CreateThread(NULL, 0, AudioInnerCapThreadProc, 0, 0, NULL);
	HANDLE hThreadAudioInnerResample = CreateThread(NULL, 0, AudioInnerResampleThreadProc, 0, 0, NULL);
	HANDLE hThreadAudioMic = CreateThread(NULL, 0, AudioMicCapThreadProc, 0, 0, NULL);


	AVFrame *frame_audio_inner_resample = NULL;
	frame_audio_inner_resample = av_frame_alloc();

	AVFrame *frame_audio_mic = NULL;
	frame_audio_mic = av_frame_alloc();


	while (bCap)
	{
		if (NULL == fifo_audio_inner)
		{
			continue;
		}
		if (NULL == fifo_audio_mic)
		{
			continue;
		}
		if (NULL == fifo_audio_inner_resample)
		{
			continue;
		}

		if (AudioFrameIndex > 3000)
		{
			bCap = false;
			break;
		}

		int fifo_inner_size = av_audio_fifo_size(fifo_audio_inner_resample);
		int fifo_mic_size = av_audio_fifo_size(fifo_audio_mic);
		//int frame_inner_min_size = pReadCodecContext->frame_size;
		//int frame_mic_min_size = pReadMicCodecContext->frame_size;

		int frame_inner_min_size = 1024;
		int frame_mic_min_size = 1024;
		
		if (fifo_inner_size >= frame_inner_min_size && fifo_mic_size >= frame_mic_min_size)
		{
			frame_audio_inner_resample->nb_samples = frame_inner_min_size;
			frame_audio_inner_resample->channel_layout = 3;
			frame_audio_inner_resample->format = pFormatCtx_AudioMic->streams[0]->codecpar->format;
			frame_audio_inner_resample->sample_rate = pFormatCtx_AudioMic->streams[0]->codecpar->sample_rate;
			av_frame_get_buffer(frame_audio_inner_resample, 0);

			frame_audio_mic->nb_samples = frame_mic_min_size;
			frame_audio_mic->channel_layout = 3;
			frame_audio_mic->format = pFormatCtx_AudioMic->streams[0]->codecpar->format;
			frame_audio_mic->sample_rate = pFormatCtx_AudioMic->streams[0]->codecpar->sample_rate;
			av_frame_get_buffer(frame_audio_mic, 0);

			int readcount = 0;
			EnterCriticalSection(&AudioSection_mic);
			readcount = av_audio_fifo_read(fifo_audio_mic, (void**)frame_audio_mic->data, frame_mic_min_size);
			LeaveCriticalSection(&AudioSection_mic);


			EnterCriticalSection(&AudioSection_inner_resample);
			readcount = av_audio_fifo_read(fifo_audio_inner_resample, (void **)frame_audio_inner_resample->data, frame_inner_min_size);
			LeaveCriticalSection(&AudioSection_inner_resample);


			
			frame_audio_inner_resample->pts = AudioFrameIndex * pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size;
			frame_audio_mic->pts = AudioFrameIndex * pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size;

			BufferSourceContext* s = (BufferSourceContext*)_filter_ctx_src_inner->priv;
			bool b1 = (s->sample_fmt != frame_audio_inner_resample->format);
			bool b2 = (s->sample_rate != frame_audio_inner_resample->sample_rate);
			bool b3 = (s->channel_layout != frame_audio_inner_resample->channel_layout);
			bool b4 = (s->channels != frame_audio_inner_resample->channels);

			ret = av_buffersrc_add_frame(_filter_ctx_src_inner, frame_audio_inner_resample);
			if (ret < 0)
			{
				printf("Mixer: failed to call av_buffersrc_add_frame (speaker)\n");
				break;
			}

			ret = av_buffersrc_add_frame(_filter_ctx_src_mic, frame_audio_mic);
			if (ret < 0)
			{
				printf("Mixer: failed to call av_buffersrc_add_frame (microphone)\n");
				break;
			}


			while (1)
			{
				AVFrame* pFrame_out = av_frame_alloc();

				ret = av_buffersink_get_frame_flags(_filter_ctx_sink, pFrame_out, 0);
				if (ret < 0)
				{
					av_frame_free(&pFrame_out);
					//printf("Mixer: failed to call av_buffersink_get_frame_flags\n");
					break;
				}
				if (pFrame_out->data[0] != NULL)
				{
					AVPacket packet_out = { 0 };
					packet_out.data = NULL;
					packet_out.size = 0;


					ret = avcodec_send_frame(pCodecEncodeCtx_Audio, pFrame_out);
					ret = avcodec_receive_packet(pCodecEncodeCtx_Audio, &packet_out);

					packet_out.stream_index = 0;
					packet_out.pts = AudioFrameIndex * pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size;
					packet_out.dts = packet_out.pts;
					packet_out.duration = pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size;

					AudioFrameIndex++;
					ret = av_interleaved_write_frame(pFormatCtx_OutputAudio, &packet_out);
					if (ret < 0)
					{
						//printf("Mixer: failed to call av_interleaved_write_frame\n");
					}
					av_packet_unref(&packet_out);
				}
				av_frame_free(&pFrame_out);
			}

			//av_frame_free(&frame_audio);
			//av_frame_free(&frame_audio_mic);
		}
	}
	Sleep(100);
	av_write_trailer(pFormatCtx_OutputAudio);

	avio_close(pFormatCtx_OutputAudio->pb);
	avformat_free_context(pFormatCtx_OutputAudio);

	WaitForSingleObject(hThreadAudioInner, 3000);
	WaitForSingleObject(hThreadAudioMic, 3000);
	WaitForSingleObject(hThreadAudioInnerResample, 3000);

	if (pFormatCtx_AudioInner != NULL)
	{
		avformat_close_input(&pFormatCtx_AudioInner);
		pFormatCtx_AudioInner = NULL;
	}

	return 0;
}



DWORD WINAPI AudioInnerCapThreadProc(LPVOID lpParam)
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	AVPacket packet = { 0 };
	int ret = 0;
	while (bCap)
	{
		av_packet_unref(&packet);
		if (av_read_frame(pFormatCtx_AudioInner, &packet) < 0)
		{
			continue;
		}

		ret = avcodec_send_packet(pReadCodecContext, &packet);
		if (ret >= 0)
		{
			ret = avcodec_receive_frame(pReadCodecContext, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				break;
			}
			else if (ret == AVERROR_EOF)
			{
				return 0;
			}
			else if (ret < 0) {
				fprintf(stderr, "Error during decoding\n");
				exit(1);
			}

			if (NULL == fifo_audio_inner)
			{
				fifo_audio_inner = av_audio_fifo_alloc((AVSampleFormat)pFormatCtx_AudioInner->streams[0]->codecpar->format,
					pFormatCtx_AudioInner->streams[0]->codecpar->channels, 30 * pFrame->nb_samples);
			}

			if (NULL == fifo_audio_inner_resample)
			{
				fifo_audio_inner_resample = av_audio_fifo_alloc((AVSampleFormat)pFormatCtx_AudioInner->streams[0]->codecpar->format, pFormatCtx_OutputAudio->streams[0]->codecpar->channels, 3000 * pFrame->nb_samples);
			}

			int buf_space = av_audio_fifo_space(fifo_audio_inner);
			if (av_audio_fifo_space(fifo_audio_inner) >= pFrame->nb_samples)
			{
				//AudioSection_inner
				EnterCriticalSection(&AudioSection_inner);
				ret = av_audio_fifo_write(fifo_audio_inner, (void **)pFrame->data, pFrame->nb_samples);
				LeaveCriticalSection(&AudioSection_inner);
			}



			av_packet_unref(&packet);
		}

	}

	return 0;
}

DWORD WINAPI AudioInnerResampleThreadProc(LPVOID lpParam)
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	AVPacket packet = { 0 };
	int ret = 0;
	int iCount = 0;

	while (1)
	{
		if (fifo_audio_inner == NULL)
		{
			continue;
		}

		if (av_audio_fifo_size(fifo_audio_inner) >=
			(pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size > 0 ? pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size : 1024))
		{
			AVFrame *frame_audio_inner = NULL;
			frame_audio_inner = av_frame_alloc();

			frame_audio_inner->nb_samples = pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size > 0 ? pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size : 1024;
			frame_audio_inner->channel_layout = pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->channel_layout;
			frame_audio_inner->format = pFormatCtx_AudioInner->streams[0]->codecpar->format;
			//frame_mic->sample_rate = pFormatCtx_OutputAudio->streams[AudioIndex_mic]->codecpar->sample_rate;
			frame_audio_inner->sample_rate = pFormatCtx_AudioInner->streams[0]->codecpar->sample_rate;
			av_frame_get_buffer(frame_audio_inner, 0);

			EnterCriticalSection(&AudioSection_inner);
			int readcount = av_audio_fifo_read(fifo_audio_inner, (void **)frame_audio_inner->data,
				(pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size > 0 ? pFormatCtx_OutputAudio->streams[AudioIndex]->codecpar->frame_size : 1024));
			LeaveCriticalSection(&AudioSection_inner);

			AVFrame *frame_audio_inner_resample = NULL;
			frame_audio_inner_resample = av_frame_alloc();

			//int iDelaySamples = swr_get_delay(audio_convert_ctx, frame_mic->sample_rate);
			int iDelaySamples = 0;
			//int dst_nb_samples = av_rescale_rnd(iDelaySamples + frame_mic->nb_samples, frame_mic->sample_rate, pCodecEncodeCtx_Audio->sample_rate, AVRounding(1));
			int dst_nb_samples = av_rescale_rnd(iDelaySamples + frame_audio_inner->nb_samples, pCodecEncodeCtx_Audio->sample_rate, frame_audio_inner->sample_rate, AV_ROUND_UP);


			frame_audio_inner_resample->nb_samples = pCodecEncodeCtx_Audio->frame_size;
			frame_audio_inner_resample->channel_layout = pCodecEncodeCtx_Audio->channel_layout;
			frame_audio_inner_resample->format = pFormatCtx_AudioInner->streams[0]->codecpar->format;
			frame_audio_inner_resample->sample_rate = pCodecEncodeCtx_Audio->sample_rate;
			av_frame_get_buffer(frame_audio_inner_resample, 0);


			//uint8_t *audio_buf = NULL;
			uint8_t *audio_buf[2] = { 0 };
			audio_buf[0] = (uint8_t *)frame_audio_inner_resample->data[0];
			audio_buf[1] = (uint8_t *)frame_audio_inner_resample->data[1];

			uint8_t* out_buffer = (uint8_t*)frame_audio_inner_resample->data[0];

			int nb = swr_convert(audio_convert_ctx, &out_buffer, dst_nb_samples, (const uint8_t**)frame_audio_inner->data, frame_audio_inner->nb_samples);



			//if (av_audio_fifo_space(fifo_audio_resample) >= pFrame->nb_samples)
			{
				EnterCriticalSection(&AudioSection_inner_resample);
				ret = av_audio_fifo_write(fifo_audio_inner_resample, (void **)frame_audio_inner_resample->data, dst_nb_samples);
				LeaveCriticalSection(&AudioSection_inner_resample);
			}

			if (!bCap)
			{
				if (av_audio_fifo_size(fifo_audio_inner) < 1024)
				{
					break;
				}
			}

		}

	}

	return 0;
}


DWORD WINAPI AudioMicCapThreadProc(LPVOID lpParam)
{
	AVFrame *pFrame;
	pFrame = av_frame_alloc();

	AVPacket packet = { 0 };
	int ret = 0;
	while (bCap)
	{
		av_packet_unref(&packet);
		if (av_read_frame(pFormatCtx_AudioMic, &packet) < 0)
		{
			continue;
		}

		ret = avcodec_send_packet(pReadMicCodecContext, &packet);
		if (ret >= 0)
		{
			ret = avcodec_receive_frame(pReadMicCodecContext, pFrame);
			if (ret == AVERROR(EAGAIN))
			{
				break;
			}
			else if (ret == AVERROR_EOF)
			{
				return 0;
			}
			else if (ret < 0) {
				fprintf(stderr, "Error during decoding\n");
				exit(1);
			}

			if (NULL == fifo_audio_mic)
			{
				fifo_audio_mic = av_audio_fifo_alloc((AVSampleFormat)pFormatCtx_AudioMic->streams[0]->codecpar->format,
					pFormatCtx_AudioMic->streams[0]->codecpar->channels, 300 * pFrame->nb_samples);
			}

			int buf_space = av_audio_fifo_space(fifo_audio_mic);
			if (av_audio_fifo_space(fifo_audio_mic) >= pFrame->nb_samples)
			{
				EnterCriticalSection(&AudioSection_mic);
				ret = av_audio_fifo_write(fifo_audio_mic, (void **)pFrame->data, pFrame->nb_samples);
				LeaveCriticalSection(&AudioSection_mic);
			}



			av_packet_unref(&packet);
		}

	}

	return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值