#include <stdio.h>
#include <MMDeviceAPI.h>
#include <AudioClient.h>
#include <AudioPolicy.h>
#include <thread>
#include "CaptureScreenWindowArea.h"
#define MAX_AUDIO_FRAME_SIZE 192000
bool capture_audio_button = true;
bool render_audio_button = true;
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
//采集声卡声音
int render_audio()
{
IAudioClient * _AudioClient;
IAudioCaptureClient *_CaptureClient;
IMMDevice * _Device;
IMMDeviceEnumerator *deviceEnumerator = NULL;
HANDLE _AudioSamplesReadyEvent = NULL;
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator));
if (FAILED(hr))
{
printf("Unable to retrieve CoCreateInstance %x\n", hr);
return -1;
}
//这里可以调用EnumAudioEndpoints选择使用其它设备
//eRender音频渲染流。音频数据从应用程序流向呈现该流的音频终结点设备。
hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, &_Device);
if (FAILED(hr))
{
printf("Unable to retrieve device %x\n", hr);
return -1;
}
SafeRelease(&deviceEnumerator);
_Device->AddRef(); // Since we're holding a copy of the endpoint, take a reference to it. It'll be released in Shutdown();
_AudioSamplesReadyEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE);
if (_AudioSamplesReadyEvent == NULL)
{
printf("Unable to create samples ready event: %d.\n", GetLastError());
return false;
}
hr = _Device->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, reinterpret_cast<void **>(&_AudioClient));
if (FAILED(hr))
{
printf("Unable to activate audio client: %x.\n", hr);
return false;
}
WAVEFORMATEX * _MixFormat;
UINT32 _BufferSize;
hr = _AudioClient->GetMixFormat(&_MixFormat);
printf("声卡:采样率:%d ,通道数 %d ,每个样本字节数 %d ,每秒钟的字节数 %d ,数据快大小 %d \n", _MixFormat->nSamplesPerSec, _MixFormat->nChannels, _MixFormat->wBitsPerSample/8, _MixFormat->nAvgBytesPerSec, _MixFormat->nBlockAlign);
if (FAILED(hr))
{
printf("Unable to get mix format on audio client: %x.\n", hr);
return false;
}
size_t _FrameSize = (_MixFormat->wBitsPerSample / 8) * _MixFormat->nChannels;
//InitializeAudioEngine
hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK| AUDCLNT_STREAMFLAGS_EVENTCALLBACK, 0, 0, _MixFormat, NULL);
if (FAILED(hr))
{
printf("Unable to initialize audio client: %x.\n", hr);
return false;
}
//
// Retrieve the buffer size for the audio client.
//
hr = _AudioClient->GetBufferSize(&_BufferSize);
if (FAILED(hr))
{
printf("Unable to get audio client buffer: %x. \n", hr);
return false;
}
hr = _AudioClient->SetEventHandle(_AudioSamplesReadyEvent);
if (FAILED(hr))
{
printf("Unable to set ready event: %x.\n", hr);
return false;
}
hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient));
if (FAILED(hr))
{
printf("Unable to get new capture client: %x.\n", hr);
return false;
}
//开始采集
hr = _AudioClient->Start();
if (FAILED(hr))
{
printf("Unable to get new capture client: %x.\n", hr);
return false;
}
/*FILE *p = NULL;
fopen_s(&p, "1render____audio.pcm", "wb+");*/
while (1)
{
if (render_audio_button == false)
{
continue;
}
//信号可以保证线程睡眠,降低cpu消耗
DWORD waitResult = WaitForSingleObject(_AudioSamplesReadyEvent, INFINITE);
BYTE *pData, *pBuffer;
INT nBufferLenght;
UINT32 framesAvailable;
DWORD flags;
pBuffer = new BYTE[MAX_AUDIO_FRAME_SIZE];
hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL);
if (SUCCEEDED(hr))
{
if (framesAvailable != 0)
{
if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
{
//
// Fill 0s from the capture buffer to the output buffer.
//
}
else
{
//
// Copy data from the audio engine buffer to the output buffer.
//
CopyMemory(pBuffer, pData, framesAvailable*_FrameSize);
//fwrite(pBuffer, framesAvailable*_FrameSize, 1, p);
}
}
hr = _CaptureClient->ReleaseBuffer(framesAvailable);
if (FAILED(hr))
{
printf("Unable to release capture buffer: %x!\n", hr);
}
}
delete[] pBuffer;
}
//fclose(p);
CoUninitialize();
return 0;
}
//采集麦克风
int capture_aduio()
{
IAudioClient * _AudioClient;
IAudioCaptureClient *_CaptureClient;
IMMDevice * _Device;
IMMDeviceEnumerator *deviceEnumerator = NULL;
HANDLE _AudioSamplesReadyEvent = NULL;
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator));
if (FAILED(hr))
{
printf("Unable to retrieve CoCreateInstance %x\n", hr);
return -1;
}
//这里可以调用EnumAudioEndpoints选择使用其它设备
//eCapture:音频捕获流,音频数据从捕获流的音频终结点设备流向应用程序。
hr = deviceEnumerator->GetDefaultAudioEndpoint(eCapture, eMultimedia, &_Device);
if (FAILED(hr))
{
printf("Unable to retrieve device %x\n", hr);
return -1;
}
SafeRelease(&deviceEnumerator);
_Device->AddRef(); // Since we're holding a copy of the endpoint, take a reference to it. It'll be released in Shutdown();
_AudioSamplesReadyEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE);
if (_AudioSamplesReadyEvent == NULL)
{
printf("Unable to create samples ready event: %d.\n", GetLastError());
return false;
}
hr = _Device->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, reinterpret_cast<void **>(&_AudioClient));
if (FAILED(hr))
{
printf("Unable to activate audio client: %x.\n", hr);
return false;
}
WAVEFORMATEX * _MixFormat;
UINT32 _BufferSize;
hr = _AudioClient->GetMixFormat(&_MixFormat);
printf("麦克风:采样率:%d ,通道数 %d ,每个样本字节数 %d ,每秒钟的字节数 %d ,数据快大小 %d \n", _MixFormat->nSamplesPerSec, _MixFormat->nChannels, _MixFormat->wBitsPerSample / 8, _MixFormat->nAvgBytesPerSec, _MixFormat->nBlockAlign);
if (FAILED(hr))
{
printf("Unable to get mix format on audio client: %x.\n", hr);
return false;
}
size_t _FrameSize = (_MixFormat->wBitsPerSample / 8) * _MixFormat->nChannels;
//InitializeAudioEngine
hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK , 0, 0, _MixFormat, NULL);
if (FAILED(hr))
{
printf("Unable to initialize audio client: %x.\n", hr);
return false;
}
//
// Retrieve the buffer size for the audio client.
//
hr = _AudioClient->GetBufferSize(&_BufferSize);
if (FAILED(hr))
{
printf("Unable to get audio client buffer: %x. \n", hr);
return false;
}
hr = _AudioClient->SetEventHandle(_AudioSamplesReadyEvent);
if (FAILED(hr))
{
printf("Unable to set ready event: %x.\n", hr);
return false;
}
hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient));
if (FAILED(hr))
{
printf("Unable to get new capture client: %x.\n", hr);
return false;
}
//开始采集
hr = _AudioClient->Start();
if (FAILED(hr))
{
printf("Unable to get new capture client: %x.\n", hr);
return false;
}
/*FILE *p = NULL;
fopen_s(&p, "1capture____audio.pcm", "wb+");*/
while (1)
{
if (capture_audio_button == false)
{
continue;
}
DWORD waitResult = WaitForSingleObject(_AudioSamplesReadyEvent, INFINITE);
BYTE *pData, *pBuffer;
INT nBufferLenght;
UINT32 framesAvailable;
DWORD flags;
pBuffer = new BYTE[MAX_AUDIO_FRAME_SIZE];
hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL);
if (SUCCEEDED(hr))
{
if (framesAvailable != 0)
{
if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
{
//
// Fill 0s from the capture buffer to the output buffer.
//
}
else
{
//
// Copy data from the audio engine buffer to the output buffer.
//
CopyMemory(pBuffer, pData, framesAvailable*_FrameSize);
//fwrite(pBuffer, framesAvailable*_FrameSize, 1, p);
//printf("get mic capture frames: %d!\n", framesAvailable);
}
}
hr = _CaptureClient->ReleaseBuffer(framesAvailable);
if (FAILED(hr))
{
printf("Unable to release capture buffer: %x!\n", hr);
}
}
delete[] pBuffer;
}
CoUninitialize();
return 0;
}
int main(int argc, void** argv[])
{
int cxScreen, cyScreen;
cxScreen = GetSystemMetrics(SM_CXSCREEN);
cyScreen = GetSystemMetrics(SM_CYSCREEN);
printf("桌面宽度:%d 桌面高度 :%d \n", cxScreen, cyScreen);
std::thread t(capture_aduio);
t.detach();
std::thread t1(render_audio);
t1.detach();
while (1)
{
Sleep(5);
}
return 0;
}
windowsAPI 声卡和麦克风分线程采集
最新推荐文章于 2020-12-18 21:47:20 发布