概述
最近的项目需要用到前端录制音频,然后转为base64编码或wav文件发送给后端,下面讲一下实现过程,首先调用navigate.getUserMedia获取麦克风,不过这个方法浏览器会弹警告已经被弃用,可以转为使用navigate.mediaDevices,不过警告归警告,还是能用的,获得麦克风以后再new一个recorder,写好采样率和采样通道,这个recorder有很多,我这里提供一个HZRecorder,这个最初版本也不知道是谁写的,我也是在其他人改过的基础上重新优化的,代码如下:
export function HZRecorder(stream, config) {
config = config || {};
config.sampleBits = config.sampleBits || 16; //采样数位 8, 16
config.sampleRate = config.sampleRate || 16000; //采样率16khz
var context = new (window.webkitAudioContext || window.AudioContext)();
var audioInput = context.createMediaStreamSource(stream);
var createScript = context.createScriptProcessor || context.createJavaScriptNode;
var recorder = createScript.apply(context, [1024, 1, 1]);
this.loginputSampleRate = function () {
return context.sampleBits
}
var audioData = {
size: 0 //录音文件长度
, buffer: [] //录音缓存
, inputSampleRate: context.sampleRate //输入采样率
, inputSampleBits: 16 //输入采样数位 8, 16
, outputSampleRate: config.sampleRate //输出采样率
, outputSampleBits: config.sampleBits //输出采样数位 8, 16
, input: function (data) {
this.buffer.push(new Float32Array(data));
this.size += data.length;
}
, compress: function (clearBuff) { //合并压缩
//合并
var data = new Float32Array(this.size);
var offset = 0;
for (var i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset);
offset += this.buffer[i].length;
}
//压缩
var compression = parseInt(this.inputSampleRate / this.outputSampleRate);
var length = data.length / compression;
var result = new Float32Array(length);
var index = 0, j = 0;
while (index < length) {
result[index] = data[j];
j += compression;
index++;
}
// TODO 每次获取清空缓冲区
if (clearBuff) {
this.buffer = []
this.size = 0
}
return result;
}
, encodeWAV: function (clearBuff) {
var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
var sampleBits = Math.min(this.inputSampleBits, this.outputSampleBits);
var bytes = this.compress(clearBuff);
var dataLength = bytes.length * (sampleBits / 8);
var buffer = new ArrayBuffer(44 + dataLength);
var data = new DataView(buffer);
var channelCount = 1;//单声道
var offset = 0;
var writeString = function (str) {
for (var i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i));
}
}
// 资源交换文件标识符
writeString('RIFF'); offset += 4;
// 下个地址开始到文件尾总字节数,即文件大小-8
data.setUint32(offset, 36 + dataLength, true); offset += 4;
// WAV文件标志
writeString('WAVE'); offset += 4;
// 波形格式标志
writeString('fmt '); offset += 4;
// 过滤字节,一般为 0x10 = 16
data.setUint32(offset, 16, true); offset += 4;
// 格式类别 (PCM形式采样数据)
data.setUint16(offset, 1, true); offset += 2;
// 通道数
data.setUint16(offset, channelCount, true); offset += 2;
// 采样率,每秒样本数,表示每个通道的播放速度
data.setUint32(offset, sampleRate, true); offset += 4;
// 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4;
// 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2;
// 每样本数据位数
data.setUint16(offset, sampleBits, true); offset += 2;
// 数据标识符
writeString('data'); offset += 4;
// 采样数据总数,即数据总大小-44
data.setUint32(offset, dataLength, true); offset += 4;
// 写入采样数据
if (sampleBits === 8) {
for (var i = 0; i < bytes.length; i++, offset++) {
var s = Math.max(-1, Math.min(1, bytes[i]));
var val = s < 0 ? s * 0x8000 : s * 0x7FFF;
val = parseInt(255 / (65535 / (val + 32768)));
data.setInt8(offset, val, true);
}
} else {
for (var i2 = 0; i2 < bytes.length; i2++, offset += 2) {
var s2 = Math.max(-1, Math.min(1, bytes[i2]));
data.setInt16(offset, s2 < 0 ? s2 * 0x8000 : s2 * 0x7FFF, true);
}
}
return new Blob([data], { type: 'audio/wav' });
}
};
//开始录音
this.start = function () {
audioInput.connect(recorder);
recorder.connect(context.destination);
}
// 暂停
this.pause = function () {
recorder.disconnect();
}
// 停止
this.stop = function () {
recorder.disconnect();
audioData.buffer = [];
audioData.size = 0;
}
this.clearBuffer = function () {
}
//获取音频文件
this.getBlob = function (clearBuff) {
clearBuff = clearBuff || false;
// this.stop();
return audioData.encodeWAV(clearBuff);
}
//回放
this.play = function (audio) {
// var blob=this.getBlob();
// saveAs(blob, "F:/3.wav");
audio.src = window.URL.createObjectURL(this.getBlob());
}
//上传
this.upload = function () {
return this.getBlob()
}
//音频采集
recorder.onaudioprocess = function (e) {
audioData.input(e.inputBuffer.getChannelData(0));
//record(e.inputBuffer.getChannelData(0));
}
}
要注意停止录音前(stop方法)需要先用getBlob获取并保存好录音的blob文件,不然可就清空了,如果不想清空可以使用pause方法,这时还能继续获取,使用完再清空也是可以的。
补充一下,blob是大二进制的格式,wav会有一个44Byte的头文件,里面会记录通道数,采样率,音频长度等信息,如果发送的文件长度只有44那就说明没有内容只有头文件。
如何调用HZRecorder为我所用呢?
var that = this
this.$nextTick(() => {
try {
// <!-- 检查是否能够调用麦克风 -->
window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
window.URL = window.URL || window.webkitURL;
that.audio_context = new AudioContext;
console.log('navigator.getUserMedia ' + (navigator.getUserMedia ? 'available.' : 'not present!'));
} catch (e) {
alert('No web audio support in this browser!');
}
navigator.getUserMedia({ audio: true }, function (stream) {
that.recorder1 = new HZRecorder(stream, {
sampleBits: 16,
sampleRate: 16000
})
// that.recorder2 = new HZRecorder(stream, {
// sampleBits: 16,
// sampleRate: 16000
// })
console.log('初始化完成');
}, function (e) {
console.log('No live audio input: ' + e);
});
})
以vue为例,要注意,生命周期里dom加载的周期,dom没加载的话要写在$nextTick()里面.
然后就可以在需要的地方愉快的调用啦
data() {
return {
recorder1: HZRecorder,
timeCount: 1,
clearRecorder: false,
}
},
methods: {
/* 开始监听录音 */
startMonitor() {
this.recorder1.start;
let interval = setInterval(() => {
if (this.clearRecorder) {
clearInterval(interval);
}
this.timeCount = this.timeCount + 1;
console.log(this.timeCount);
if (this.timeCount % 3 == 0) {
let param = this.recorder1.getBlob();
this.postHelloWav(param);
this.recorder1.stop();
this.recorder1.start();
}
}, 1000)
},
/* 开始一段录音 */
startRecording() {
this.textarea = "";
this.recorder1.start();
setTimeout(() => {
this.$message.success("录音结束");
let param = this.recorder1.getBlob();
this.recorder1.stop();
this.postWav(param);
this.clearRecorder = false;
this.startMonitor();
}, 5000)
},
}
这里有两个方法,监听录音和简单的一段式录音,监听录音使用setInterval,每3s录制一次,一段式录音使用setTimeout,录制5s。
其中postWav和postHelloWav是我用于传送语音至后端的,不需要的同学可以自己删掉啦