webrtc-audio-processing pulseaudio最新版本1.0交叉编译到ARM

   最近在研究最新版本的aec3效果,之前0.31的太老了。百度,Google搜索一边发现都是基于0.31的。也没有找到1.0版本的demo(此时官网也没有提供)。完成工作任务后,写个博客记录一下,为其它猿少踩坑。

一、先下载源码和配置开发环境:

编译webrtc的时候会依赖链接abseil-cpp库。

git clone git://github.com/abseil/abseil-cpp.git

git clone https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing.git

看看编译工具的版本cmake和meso,ninja版本。

 

 交叉编译器要支持c++14. 这些工具怎么安装可以自行百度。

 二、编译abseil-cpp:

修改CMakeLists.txt文件。加入:(xxx是我自己的目录这里不方便发正式名字,读者要改成自己的交叉编译工具所在的目录)

set(CMAKE_SYSTEM_NAME Linux)

SET(CMAKE_BUILD_TYPE "Release")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")

set(CMAKE_CROSSCOMPILING ON)
set(ABSL_PROPAGATE_CXX_STD ON)


set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
# arm 32
SET(CMAKE_C_COMPILER   /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc)

SET(CMAKE_CXX_COMPILER /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++)

SET(CMAKE_FIND_ROOT_PATH /work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf/)

set(CMAKE_SYSTEM_PROCESSOR arm.v7)

# aarch64
#SET(CMAKE_C_COMPILER   /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc)

#SET(CMAKE_CXX_COMPILER /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-g++)

#SET(CMAKE_FIND_ROOT_PATH /work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-aarch64-linux-gnu/)

#set(CMAKE_SYSTEM_PROCESSOR aarch64)
cd abseil-cpp/
mkdir build
cd build

编译arm32执行这个:
cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local  -DCMAKE_CXX_STANDARD=14 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=arm.v7

aarch64:
cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local  -DCMAKE_CXX_STANDARD=14 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=aarch64

编译&安装:
 make -j8 && make install

这里安装在/usr/local目录。建议读者都用这个目录。我在这里被坑了很久,刚开始我是安装在/usr,或我自己的/work/install目录。结果在编译webrtc的时候出问题了。也可能和各个系统的环境变量有关系。要研究meson,ninja才能知道根本原因。

三、编译pulseaudio-webrtc-audio-processing

先来看编译脚本:

 #!/bin/bash
set -e
meson arm-build --prefix=/work/arm/webrtc-audio-processing/install --cross-file cross_file.txt
ninja -C arm-build
DESTDIR=/work/arm/webrtc-audio-processing/install ninja -C arm-build install

看交叉编译cross_file.txt文件:(如果是64为需要自己修改cpu_family = 'aarch64',cpu = 'armv8a')

[binaries]
c = 'arm-linux-gnueabihf-gcc'
cpp ='arm-linux-gnueabihf-g++'
ar = 'arm-linux-gnueabihf-ar'
ld = 'arm-linux-gnueabihf-ld'
srtip = 'arm-linux-gnueabihf-strip'
sys_root = '/work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf'
pkg_config_libdir = '/work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf/usr/lib/pkgconfig'
#这行是必须的否则会出错!
cmake = 'cmake'

[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7l'
endian = 'little'

[target_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7l'
endian = 'little'

[build_machine]
system = 'linux'
cpu_family = 'x86_64'
cpu = 'i686'
endian = 'little'

最后修改meso.build改成  'buildtype=release',

在源码目录执行编译脚本就可以成功了。编译完后执行

arm-linux-gnueabihf-strip libwebrtc-audio-processing-1.so.1缩减so文件大小。

四、把/usr/local/include/absl/ ,webrtc install里的include目录所有头文件拷贝到自己的目录。

测试代码:(-I,-L后面的路径看你是把absl和webrtc安装文件考到哪里)

/*
编译命令:
arm-linux-gnueabihf-g++ -o demo demo.cc  -I ./include/webrtc-audio-processing-1/ -I ./include/webrtc-audio-processing-1/modules/ -L ./lib/ -I /work/arm/abseil-cpp/install/include/ -lwebrtc-audio-processing-1
板子上跑起来测试效果:
[root@xxx]/mnt/nfs# cp libwebrtc-audio-processing-1.so.1 /mnt/system/usr/lib/                                      
3rd/                                                                                                                  
[root@xxx]/mnt/nfs# ./demo aec_source_far16k.pcm aec_source_near16k.pcm webrt                                      
c.pcm                                                                                                                 
samples_per_frame =160                                                                                                
bytes_per_frame =320                                                                                                  
delay_ms =95 
*/

#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_control.h"

#include "audio_processing/include/audio_processing.h"



#include <iostream>

using namespace webrtc;
using namespace std;


int main(int argc, char* argv[])
{
    FILE *fd_far  = NULL;
    FILE *fd_near = NULL;
    FILE *fd_out  = NULL;
    constexpr int16_t kAudioLevel = 10000;
    constexpr int kSampleRateHz = 16000;
    constexpr int kNumChannels = 1;

    fd_far  = fopen(argv[1], "rb");
    fd_near = fopen(argv[2], "rb");
    fd_out  = fopen(argv[3], "wb");
    if(!fd_far || !fd_near || !fd_out) {
        cout << "fopen file fail!" << endl;
    }
#if 1
//head file
// APM accepts only linear PCM audio data in chunks of 10 ms.
	int samples_per_frame = kSampleRateHz / 100;
    int bits_per_sample = 16;
	int bytes_per_frame = samples_per_frame * bits_per_sample / 8;
    int  NN = samples_per_frame ;//160 16000/1000*10

    int delay_ms = 95;
    int analog_level = 60;

    cout << "samples_per_frame =" << samples_per_frame << endl;
    cout << "bytes_per_frame =" << bytes_per_frame << endl;
    cout << "delay_ms =" << delay_ms << endl;


    int16_t *render_frame = (int16_t*)malloc(1024);// >bytes_per_frame
    int16_t *render_frame_out = (int16_t*)malloc(1024);
    int16_t *capture_frame = (int16_t*)malloc(1024);
    int16_t *capture_frame_out = (int16_t*)malloc(1024);
    webrtc::StreamConfig inStreamConfig  = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);
    webrtc::StreamConfig outStreamConfig = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);

    AudioProcessing* apm = AudioProcessingBuilder().Create();

    AudioProcessing::Config config;
    config.echo_canceller.enabled = true;
    config.echo_canceller.mobile_mode = false;

    config.gain_controller1.enabled = true;
    config.gain_controller1.mode =
    AudioProcessing::Config::GainController1::kAdaptiveAnalog;
    config.gain_controller1.analog_level_minimum = 0;
    config.gain_controller1.analog_level_maximum = 255;

    config.gain_controller2.enabled = true;

    config.high_pass_filter.enabled = true;

    config.voice_detection.enabled = true;

    apm->ApplyConfig(config);

    while(1){

        if (NN == fread(render_frame, sizeof(int16_t), NN, fd_far)) {
            fread(capture_frame, sizeof(int16_t), NN, fd_near);
            // ... Render frame arrives bound for the audio HAL ...
            //apm->ProcessReverseStream(render_frame);
            apm->ProcessReverseStream(render_frame, inStreamConfig, outStreamConfig, NULL);

            apm->ProcessStream(capture_frame, inStreamConfig, outStreamConfig, capture_frame_out);


            fwrite(capture_frame_out, sizeof(int16_t), NN, fd_out);
        }else {
            cout << "read far file end NULL" << endl;
            break;
        }

    }

    delete apm;

    free(capture_frame);
    free(capture_frame_out);
    free(render_frame);
    free(render_frame_out);
    fclose(fd_far);
    fclose(fd_near);
    fclose(fd_out);

#endif


	return 0;
}

附上效果图:

初步效果还是可以的。后面还需对接到pcm_read,pcm_write相关的线程里实现通话3A功能。

来个实际对讲例子代码:(完整源码音频文件下载链接:https://download.csdn.net/download/longruic/21342708

/*
编译:
arm-linux-gnueabihf-g++ -o xxx_audio_webrtc_3a cvi_audio_webrtc_3a.c  -I ./include/webrtc-audio-processing-1/ -I ./include/webrtc-audio-processing-1/modules/ -I ./include/  -I ./include/webrtc-audio-processing-1/ -lwebrtc-audio-processing-1 -L /work/install_32/lib/ -ltinyalsa -L ../../lib/
执行测试
[root@crl6]/mnt/nfs# ./xxx_audio_webrtc_3a ./aec_source_far16k.pcm  ./yuan.pcm ./test.pcm
*/

#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_control.h"

#include "audio_processing/include/audio_processing.h"
#include "asoundlib.h"


#include <iostream>

using namespace webrtc;
using namespace std;

struct pcm *capture_handle;
struct pcm *playback_handle;
constexpr int16_t kAudioLevel = 10000;
constexpr int kSampleRateHz = 16000;
constexpr int kNumChannels = 1;
int  NN = 320;
int pcm_write_size = 0;

void initCapture(){
	
	int period_size = NN;
	int s32sample_rate = 0;
	int record_second = 12;
	int period_count = 2;
	int channel = kNumChannels;
	struct pcm_config capture_config;
	enum pcm_format format = PCM_FORMAT_S16_LE;
	int card = 0;
	int device = 0;	

	
	memset(&capture_config, 0, sizeof(capture_config));
	capture_config.channels = channel;
	capture_config.rate = kSampleRateHz;
	printf("check period_size period cnt[%d][%d]\n", period_size, period_count);
	capture_config.period_size = period_size;
	capture_config.period_count = period_count;
	capture_config.format = format;
	capture_config.start_threshold = 0;
	capture_config.stop_threshold = 13245;
	capture_config.silence_threshold = 0;
	capture_handle = pcm_open(card, device, PCM_IN, &capture_config);

	if (!capture_handle || !pcm_is_ready(capture_handle)) {
		printf("Unable to open PCM device (%s)\n",
			   pcm_get_error(capture_handle));
	}

	int	size = pcm_frames_to_bytes(capture_handle, pcm_get_buffer_size(capture_handle));

	printf("[pcm_get_frame_size][%d]\n", size);
	size =	pcm_frames_to_bytes(capture_handle, period_size);
}

void initPlayback() {
	struct pcm_config pcm_config;
	int card = 1;
	int device = 0;
	memset(&pcm_config, 0, sizeof(pcm_config));
	pcm_config.channels = kNumChannels;
	pcm_config.rate = kSampleRateHz;
	pcm_config.period_size = NN;
	pcm_config.period_count = 4;
	pcm_config.format = PCM_FORMAT_S16_LE;

	pcm_config.start_threshold = 0;
	pcm_config.stop_threshold = 0;
	pcm_config.silence_threshold = 0;	
	
	playback_handle = pcm_open(card, device, PCM_OUT, &pcm_config);

	if (!playback_handle || !pcm_is_ready(playback_handle)) {
		printf("Unable to open PCM card %d device %u (%s)\n",
			card, device, pcm_get_error(playback_handle));

		return ;
	}
	
	pcm_write_size = pcm_frames_to_bytes(playback_handle, pcm_get_buffer_size(playback_handle));
	
}

int main(int argc, char* argv[])
{
    FILE *fd_far  = NULL;
    FILE *fd_yuan_out = NULL;
    FILE *fd_out  = NULL;
	int err = -1;
	
    fd_far  = fopen(argv[1], "rb");
    fd_yuan_out = fopen(argv[2], "wb");
    fd_out  = fopen(argv[3], "wb");
    if(!fd_far || !fd_yuan_out || !fd_out) {
        cout << "fopen file fail!" << endl;
    }

	int samples_per_frame = kSampleRateHz / 100;
    int bits_per_sample = 16;
	int bytes_per_frame = samples_per_frame * bits_per_sample / 8;

    int delay_ms = 95;
    int analog_level = 60;
	
	initCapture();
	initPlayback();
	
    cout << "samples_per_frame =" << samples_per_frame << endl;
    cout << "bytes_per_frame =" << bytes_per_frame << endl;
    cout << "delay_ms =" << delay_ms << endl;


    int16_t *render_frame = (int16_t*)malloc(321);
    int16_t *capture_frame = (int16_t*)malloc(321);
    int16_t *capture_frame_out = (int16_t*)malloc(321);
    webrtc::StreamConfig inStreamConfig  = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);
    webrtc::StreamConfig outStreamConfig = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);

    AudioProcessing* apm = AudioProcessingBuilder().Create();

    AudioProcessing::Config config;
    config.echo_canceller.enabled = true;
    config.echo_canceller.mobile_mode = false;

    config.gain_controller1.enabled = true;
    config.gain_controller1.mode =
    AudioProcessing::Config::GainController1::kAdaptiveAnalog;
    config.gain_controller1.analog_level_minimum = 0;
    config.gain_controller1.analog_level_maximum = 255;

    config.gain_controller2.enabled = true;

    config.high_pass_filter.enabled = true;

    config.voice_detection.enabled = true;

    apm->ApplyConfig(config);
	
    while(1){
		pcm_write_size = NN;
        if (pcm_write_size == fread((char*)render_frame, 1, pcm_write_size, fd_far)) {

			err = pcm_write(playback_handle, (char*)render_frame, pcm_write_size);

            apm->ProcessReverseStream(render_frame, inStreamConfig, outStreamConfig, NULL);

			err = pcm_read(capture_handle, (char*)capture_frame, pcm_write_size);

			if (err > 0) {
				printf("[%s]\n", pcm_get_error(capture_handle));
			}
            apm->ProcessStream(capture_frame, inStreamConfig, outStreamConfig, capture_frame_out);

			fwrite((char*)capture_frame, 1, pcm_write_size, fd_yuan_out);
		
            fwrite((char*)capture_frame_out, 1, pcm_write_size, fd_out);

        }else {
            cout << "read far file end" << endl;
            break;
        }

    }
    delete apm;

    free(capture_frame);
    free(capture_frame_out);
    free(render_frame);

    fclose(fd_far);
    fclose(fd_yuan_out);
    fclose(fd_out);
	
	pcm_close(capture_handle);
	pcm_close(playback_handle);

	return 0;
}

  • 4
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 6
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值