WebRTC Native 视频会议实例教程(三) 启动WebRTC引擎, 获取本地与远程音频数据

默认情况下, WebRTC的推流以后, 本地是拿不到推流出去的数据的,如果想拿到本地回调的数据, 有两种方法:

        一. 派生AudioDeviceModule, AudioTransport, 通过ADM的RegisterAudioCallback将自己实现的AudioTransport对象指针传进去, 便可以收到RecordedDataIsAvailable回调数据了。

        这里简单说一下派生AudioDeviceModule方法,由于自己派生自AudioDeviceModule方法以及AudioTransport以后, WebrtcVoiceEngine内部还会创建一个默认的AudioTransport,所以在RegisterAudioCallback的时候,注意调用这个方法的先后顺序, 需要在创建完PC以后再注册自己的AudioTransport。

// MyAudioDeviceTransport.h
class MyAudioDeviceTransport : public webrtc::AudioTransportImpl
{
public:
	MyAudioDeviceTransport(webrtc::AudioMixer* mixer, webrtc::AudioProcessing* audio_processing);
	~MyAudioDeviceTransport() override;

	int32_t RecordedDataIsAvailable(const void* audioSamples, const size_t nSamples, const size_t nBytesPerSample, const size_t nChannels, const uint32_t samplesPerSec, const uint32_t totalDelayMS,
	const int32_t clockDrift, const uint32_t currentMicLevel, const bool keyPressed, uint32_t& newMicLevel) override;

	int32_t NeedMorePlayData(const size_t nSamples, const size_t nBytesPerSample, const size_t nChannels, const uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut,
	int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override;

	void PullRenderData(int bits_per_sample, int sample_rate, size_t number_of_channels, size_t number_of_frames, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override;
};
//MyAudioDeviceTransport.cpp
#include "MyAudioDeviceTransport.h"

MyAudioDeviceTransport::HBAudioDeviceTransport(webrtc::AudioMixer* mixer, webrtc::AudioProcessing* audio_processing):AudioTransportImpl(mixer, audio_processing)
{
}


MyAudioDeviceTransport::~MyAudioDeviceTransport()
{
}

int32_t MyAudioDeviceTransport::RecordedDataIsAvailable(const void* audioSamples, const size_t nSamples, const size_t nBytesPerSample, const size_t nChannels, const uint32_t samplesPerSec,
	const uint32_t totalDelayMS, const int32_t clockDrift, const uint32_t currentMicLevel, const bool keyPressed, uint32_t& newMicLevel)
{
	int nRet = AudioTransportImpl::RecordedDataIsAvailable(audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel);
    //在这里添加数据处理代码
	return nRet;
}

int32_t MyAudioDeviceTransport::NeedMorePlayData(const size_t nSamples, const size_t nBytesPerSample, const size_t nChannels, const uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms)
{
	int nRet = AudioTransportImpl::NeedMorePlayData(nSamples, nBytesPerSample, nChannels, samplesPerSec, audioSamples, nSamplesOut, elapsed_time_ms, ntp_time_ms);
    //在这里添加数据处理代码
	return nRet;
}

void MyAudioDeviceTransport::PullRenderData(int bits_per_sample, int sample_rate, size_t number_of_channels, size_t number_of_frames, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms)
{
	AudioTransportImpl::PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms);
    //在这里添加数据处理代码
}

下面代码继续实现AudioDeviceModule

//MyAudioDeviceModule.h
class MyAudioDeviceModule : public webrtc::AudioDeviceModuleImpl
{
public:
    MyAudioDeviceModule(webrtc::AudioDeviceModule::AudioLayer audio_layer, webrtc::TaskQueueFactory* task_queue_factory);
    virtual ~HBAudioDeviceModule();

    // 需要自定义创建ADM对象的方法,返回自己定义的ADM对象
    static rtc::scoped_refptr<base::HBAudioDeviceModule> CreateAudioDeviceModule(webrtc::AudioDeviceModule::AudioLayer audio_layer, webrtc::TaskQueueFactory* task_queue_factory);

    // 如果不额外处理AudioTransport相关业务,可以不用重载这个方法
    int32_t RegisterAudioCallback(webrtc::AudioTransport* audioCallback)override;
};
// MyAudioDeviceModule.cpp
#include "MyAudioDeviceModule.h"

rtc::scoped_refptr<base::HBAudioDeviceModule> MyAudioDeviceModule::CreateAudioDeviceModule(webrtc::AudioDeviceModule::AudioLayer audio_layer, webrtc::TaskQueueFactory* task_queue_factory)
{
	// return new rtc::RefCountedObject<HBAudioDeviceModule>(audio_layer, task_queue_factory);
    // Create the generic reference counted (platform independent) implementation.
    rtc::scoped_refptr<base::HBAudioDeviceModule> audioDevice(
       new rtc::RefCountedObject<base::HBAudioDeviceModule>(webrtc::AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory));

     rtc::scoped_refptr<base::HBAudioDeviceModule> audioDevice(new rtc::RefCountedObject<base::HBAudioDeviceModule>(audio_layer, task_queue_factory));
     // Ensure that the current platform is supported.
     if (audioDevice->CheckPlatform() == -1)
     {
 	    return nullptr;
     }
 
     if (audioDevice->CreatePlatformSpecificObjects() == -1)
     {
         return nullptr;
     }
 
     // Ensure that the generic audio buffer can communicate with the platform
     // specific parts.
     if (audioDevice->AttachAudioBuffer() == -1)
     {
 	    return nullptr;
     }
	 return audioDevice;
}

MyAudioDeviceModule::HBAudioDeviceModule(webrtc::AudioDeviceModule::AudioLayer audio_layer, webrtc::TaskQueueFactory* task_queue_factory)
    :AudioDeviceModuleImpl(audio_layer, task_queue_factory) 
{
}


MyBAudioDeviceModule::~MyAudioDeviceModule() 
{
}


MyAudioDeviceModule::RegisterAudioCallback(webrtc::AudioTransport* audioCallback)
{
    return AudioDeviceModuleImpl::RegisterAudioCallback(audioCallback);
}


另外,也可以通过派生AudioProcess类, 重载ProcessStream方法拿到音频数据!示例代码如下:

    
 class HBAudioProcess : public webrtc::AudioProcessingImpl
 {
 public:
    static HBAudioProcess* Create();

    HBAudioProcess(const webrtc::Config& config);
    ~HBAudioProcess() override;

    int ProcessStream(const int16_t* const src,	const webrtc::StreamConfig& input_config, const webrtc::StreamConfig& output_config, int16_t* const dest) override;

};
#include "HBAudioProcess.h"

MyAudioProcess::MyAudioProcess(const webrtc::Config& config) :AudioProcessingImpl(config, nullptr, nullptr, nullptr, nullptr, nullptr)
{
}

MyAudioProcess::~MyAudioProcess()
{
}

int MyHBAudioProcess::ProcessStream(const int16_t* const src, const webrtc::StreamConfig& input_config, const webrtc::StreamConfig& output_config, int16_t* const dest)
{

 	size_t nc = input_config.num_channels();
 	int sr = input_config.sample_rate_hz();
 	size_t ns = input_config.num_samples();

 	if (m_recorder->isRecording())
 	{
		m_recorder->AddAudioData(TRUE, src, ns, 2, nc, sr, 0);
 	}

	// 以下代码先执行的话,PacerThread会有问题,必崩
	int nRet = AudioProcessingImpl::ProcessStream(src, input_config, output_config, dest);
	return nRet;
}

MyAudioProcess* MyAudioProcess::Create()
{
	webrtc::Config config;

	// Standard implementation.
	HBAudioProcess* apm = new rtc::RefCountedObject<HBAudioProcess>(config);
	if (apm->Initialize() != AudioProcessing::kNoError) {
		delete apm;
		apm = nullptr;
	}

	apm->m_recorder = FFmpegRecorder::getInstance();
	return apm;
}


        二. 以下是本文重点:通过CreateAudioDeviceWithDataObserver方法, 绑定订阅者对象到AudioDeviceModule, 这样也可以拿到音频数据, 这样的方法更简单, 下文举例使用此种方法创建WebRTC引擎。
 

示例开始:

首先,我们先写一个订阅者模式所需要的回调对象,实现部份的代码处理以及如何录制音频,后文会继续进行说明,所以这里仅给出了对象的声明部份, 如下:

class AudioDeviceDataProc : public webrtc::AudioDeviceDataObserver
{
public:
    virtual void OnCaptureData(const void* audio_samples,
        const size_t num_samples,
        const size_t bytes_per_sample,
        const size_t num_channels,
        const uint32_t samples_per_sec) override;

    virtual void OnRenderData(const void* audio_samples,
        const size_t num_samples,
        const size_t bytes_per_sample,
        const size_t num_channels,
        const uint32_t samples_per_sec) override;

    HBAudioDeviceData() = default;
    virtual ~HBAudioDeviceData() = default;
};

首先创建3个WebRTC的线程,注意worker线程必须手动创建,创建ADM对象需要用到:

    std::unique_ptr<rtc::Thread>                            m_networkThread;
    std::unique_ptr<rtc::Thread>                            m_signalingThread;
    std::unique_ptr<rtc::Thread>                            m_workerThread;

    m_networkThread = rtc::Thread::CreateWithSocketServer();
    m_networkThread->SetName("network_thread", nullptr);
    m_networkThread->Start();

    m_workerThread = rtc::Thread::Create();
    m_workerThread->SetName("worker_thread", nullptr);
    m_networkThread->Start();

    m_signalingThread = rtc::Thread::Create();
    m_signalingThread->SetName("signaling_thread", nullptr);
    m_signalingThread->Start();

        下面开始引擎的启动部份,因为ADM的初始化必须在工作线程中,所以在工作线程中初始化ADM对象,订阅数据,以及启动设备:

m_workerThread->Invoke<void>(RTC_FROM_HERE, [&, this] {
    rtc::scoped_refptr<webrtc::AudioDeviceModule> spAdm = webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::kPlatformDefaultAudio, m_task_queue_factory.get());
    m_audioDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(spAdm, std::make_unique<AudioDeviceDataProc>(*new AudioDeviceDataProc));
    m_audioDeviceModule->Init();

    //int nPlayCount = m_audioDeviceModule->PlayoutDevices();
    //int nRecordCount = m_audioDeviceModule->RecordingDevices();
    m_audioDeviceModule->SetPlayoutDevice(0);    // 使用第0号播放设备 
    m_audioDeviceModule->SetRecordingDevice(0);  // 使用第0号录制设备 

    //m_audioDeviceModule->InitPlayout();        // 无需调用,调用后远程声音听不到(没找到原因)
    m_audioDeviceModule->InitRecording();        // 初始化录制设备

});

        AudioDeviceModule创建好以后,通过webrtc的全局函数CreatePeerConnectionFactory进行引擎的启动。代码如下:

m_task_queue_factory = webrtc::CreateDefaultTaskQueueFactory();
rtc::scoped_refptr<webrtc::AudioEncoderFactory> spAudioEncoder = webrtc::CreateBuiltinAudioEncoderFactory();
rtc::scoped_refptr<webrtc::AudioDecoderFactory> spAudioDecoder = webrtc::CreateBuiltinAudioDecoderFactory();
std::unique_ptr<webrtc::VideoEncoderFactory> spVideoEncoder = webrtc::CreateBuiltinVideoEncoderFactory();
std::unique_ptr<webrtc::VideoDecoderFactory> spVideoDecoder = webrtc::CreateBuiltinVideoDecoderFactory();
m_peer_connection_factory = webrtc::CreatePeerConnectionFactory(
    m_networkThread.get()      /* 网络线程,脱管 */,
    m_workerThread.get()       /* 工作线程,预初始ADM*/,
    m_signalingThread.get()    /* 信令线程,脱管 */,
    m_audioDeviceModule.get()  /* 这里使用二合一的ADM组件,见上文 */,
    spAudioEncoder,            /* 音频编码器组件,默认 */
    spAudioDecoder,            /* 音频解码器组件,默认 */
    std::move(spVideoEncoder), /* 视频编码器组件,默认 */
    std::move(spVideoDecoder), /* 视频解码器组件,默认 */
    nullptr,                   /* 混音器组件组件,默认 */
    nullptr                    /* 音频处理组件,默认 */
);

经过以上代码步骤, WebRTC的基础框架部份初始化就算完成了,最后,需要通过Mediasoup创建音频数据(视频数据同理)的生产者Producer并通过Transport将数据推送给服务器。

首先,通过服务器传过来的Json参数创建SendTransport对象与RecvTransport对象。

mediasoupclient::Producer* MediasoupMeeting::createProducer(QString kind)
{

    if ((kind == "audio") && m_pSoupDevice->CanProduce(kind.toStdString()))
    {
        // pcf为上文创建的WebRTC工厂对象:webrtc::PeerConnectionFactoryInterface
        rtc::scoped_refptr<webrtc::AudioSourceInterface> as = pcf->CreateAudioSource(cricket::AudioOptions());
        audio_track = pcf->CreateAudioTrack(guid, as);

        // 将生产者数据推送出去
        pSendTransport->Produce(soup_listener, audio_track, nullptr, nullptr);        
    }
    else if ((kind == "video") && m_pSoupDevice->CanProduce(kind.toStdString()))
    {
        rtc::scoped_refptr<webrtc::VideoTrackInterface> localTrack;
        localTrack = MediasoupMeeting::createVideoTrack(rtc::CreateRandomUuid());

        std::vector<webrtc::RtpEncodingParameters> encodings;
        encodings.emplace_back(webrtc::RtpEncodingParameters());
        encodings.emplace_back(webrtc::RtpEncodingParameters());
        encodings.emplace_back(webrtc::RtpEncodingParameters());
        pSendTransport->Produce(pSoupListener, localTrack, &encodings, nullptr);
    }

}

在收到服务器的新数据通知(Notice::NewConsumer)以后,则进行数据订阅的流程

mediasoupclient::Consumer* MediasoupMeeting::createConsumer(nlohmann::json json)
{
    // json参数为服务器传过来的消息内容
    QString kind = QString::fromStdString(json["kind"].dump());
    if (kind.contains("audio"))
    {
        // 订阅服务器发送过来的音频流数据
        pRecvTransport->Consume(soup_listener,json["id"],json["producerId"],"audio",&json["rtpParameters"]);
    }
    else if (kind.contains("video"))
    {
        pRecvTransport->Consume(m_pSoupListener,json["id"],json["producerId"],"video",&json["rtpParameters"]);
    }
    
}

以上Mediasoup生产者与传输对象流程走完,  至此,  音频录制的功能的数据部份就算调通了。

此时,应该氷可以通过下面提到的两个订阅者回调方法拿到音频数据了。


记住这两个音频回调函数,后续录制音视频的时候需要从这个地方拿音频帧数据:

virtual void OnCaptureData(...);        // 本地每10ms音频帧数据回调

virtual void OnRenderData(...);        // 远程每10ms音频帧数据回调

  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值