使用webrtc 处理降噪,去回声,增益,均衡等
MainActivity代码:
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.media.MediaRecorder;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.View;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.SeekBar;
/**
* Desc:
*/
public class MainActivity extends AppCompatActivity implements View.OnClickListener {
SeekBar skbVolume;//控制音量大小
boolean isProcessing = true;//判断是否录放
boolean isRecording = false;//判断是否录放
static final int FREQUENCY = 44100;
static final int CHANNELCONFIGURATION = AudioFormat.CHANNEL_CONFIGURATION_MONO;
static final int AUDIOENCODING = AudioFormat.ENCODING_PCM_16BIT;
int recBufSize, playBufSize;
AudioRecord audioRecord;
AudioTrack audioTrack;
private WebrtcProcessor mProcessor;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
//获得录音缓存大小
recBufSize = AudioRecord.getMinBufferSize(FREQUENCY, CHANNELCONFIGURATION, AUDIOENCODING);
Log.e("", "recBufSize:" + recBufSize);
//获得播放缓存大小
playBufSize = AudioTrack.getMinBufferSize(FREQUENCY, CHANNELCONFIGURATION, AUDIOENCODING);
//创建录音和播放实例
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, FREQUENCY, CHANNELCONFIGURATION, AUDIOENCODING, recBufSize);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, FREQUENCY, CHANNELCONFIGURATION, AUDIOENCODING, playBufSize, AudioTrack.MODE_STREAM);
findViewById(R.id.btnRecord).setOnClickListener(this);
findViewById(R.id.btnStop).setOnClickListener(this);
skbVolume = (SeekBar) this.findViewById(R.id.skbVolume);
skbVolume.setMax(100);//音量调节的最高值
skbVolume.setProgress(50);//seekbar的位置
audioTrack.setStereoVolume(0.3f,0.3f);//设置当前音量
skbVolume.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
float vol = (float) (seekBar.getProgress()) / (float) (seekBar.getMax());
audioTrack.setStereoVolume(vol, vol);//设置音量
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
}
});
((CheckBox) findViewById(R.id.cb_ap)).setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton view, boolean checked) {
isProcessing = checked;
}
});
initProccesor();
}
@Override
protected void onDestroy() {
releaseProcessor();
android.os.Process.killProcess(android.os.Process.myPid());
super.onDestroy();
}
@Override
public void onClick(View v) {
if (v.getId() == R.id.btnRecord) {
isRecording = true;
//启动线程,开始录音和一边播放
new RecordPlayThread().start();
} else if (v.getId() == R.id.btnStop) {
isRecording = false;
}
}
class RecordPlayThread extends Thread {
public void run() {
try {
short[] buffer = new short[recBufSize / 2];
audioRecord.startRecording();//开始录制
audioTrack.play();//开始播放
while (isRecording) {
//第一步:从MIC保存数据到缓冲区
int bufferReadResult = audioRecord.read(buffer, 0, recBufSize / 2);
short[] tmpBuf_src = new short[bufferReadResult];
System.arraycopy(buffer, 0, tmpBuf_src, 0, bufferReadResult);
//第二步:进行处理
if (isProcessing) {
processData(tmpBuf_src);
}
//写入数据即播放
audioTrack.write(tmpBuf_src, 0, tmpBuf_src.length);
}
audioTrack.stop();
audioRecord.stop();
} catch (Exception t) {
t.printStackTrace();
}
}
}
;
/**
* 初始化降噪
*/
private void initProccesor() {
mProcessor = new WebrtcProcessor();
mProcessor.init(FREQUENCY);
}
/**
* 释放降噪资源
*/
private void releaseProcessor() {
if (mProcessor != null) {
mProcessor.release();
}
}
/**
* 处理需要降噪的音频数据
*
* @param data
*/
private void processData(byte[] data) {
if (mProcessor != null) {
mProcessor.processNoise(data);
}
}
/**
* 处理需要降噪的音频数据
*
* @param data
*/
private void processData(short[] data) {
if (mProcessor != null) {
mProcessor.processNoise(data);
}
}
}
主演实现降噪的 WebrtcProcessor 代码:
import android.util.Log;
/**
* Desc:
*/
public class WebrtcProcessor {
static {
try {
//加载降噪库
System.loadLibrary("webrtc");
} catch (UnsatisfiedLinkError e) {
Log.e("TAG", e.getMessage());
}
}
/**
* 处理降噪
*
* @param data
*/
public void processNoise(byte[] data) {
if (data == null) return;
int newDataLength = data.length / 2;
if (data.length % 2 == 1) {
newDataLength += 1;
}
//此处是将字节数据转换为short数据
short[] newData = new short[newDataLength];
for (int i = 0; i < newDataLength; i++) {
byte low = 0;
byte high = 0;
if (2 * i < data.length) {
low = data[2 * i];
}
if ((2 * i + 1) < data.length) {
high = data[2 * i + 1];
}
newData[i] = (short) (((high << 8) & 0xff00) | (low & 0x00ff));
}
// 交给底层处理
processNoise(newData);
//处理完之后, 又将short数据转换为字节数据
for (int i = 0; i < newDataLength; i++) {
if (2 * i < data.length) {
data[2 * i] = (byte) (newData[i] & 0xff);
}
if ((2 * i + 1) < data.length) {
data[2 * i + 1] = (byte) ((newData[i] >> 8) & 0xff);
}
}
}
/**
* 初始化降噪设置
*
* @param sampleRate 采样率
* @return 是否初始化成功
*/
public native boolean init(int sampleRate);
/**
* 处理降噪
*
* @param data
* @return
*/
public native boolean processNoise(short[] data);
/**
* 释放降噪资源
*/
public native void release();
}
主要是根据以上两个类实现的,更多数据请查看源码: