功能实现思路主要是用的浏览器中录音功能,使用录制的音频实时传递给后台实现对讲功能。
class Recorder {
constructor(stream) {
this.stream = stream
this.sampleBits = 16; //输出采样数位 8, 16
this.sampleRate = 8000; //输出采样率
this.context = new AudioContext();
this.audioInput = this.context.createMediaStreamSource(stream);
this.recorder = this.context.createScriptProcessor(4096, 1, 1);
this.audioData = {
size: 0, //录音文件长度
buffer: [], //录音缓存
inputSampleRate: 48000, //输入采样率
inputSampleBits: 16, //输入采样数位 8, 16
outputSampleRate: this.sampleRate, //输出采样数位
oututSampleBits: this.sampleBits, //输出采样率
clear: function () {
this.buffer = [];
this.size = 0;
},
input: function (data) {
this.buffer.push(new Float32Array(data));
this.size += data.length;
},
compress: function () {
//合并压缩
//合并
var data = new Float32Array(this.size);
var offset = 0;
for (var i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset);
offset += this.buffer[i].length;
}
//压缩
var compression = parseInt(
this.inputSampleRate / this.outputSampleRate
);
var length = data.length / compression;
var result = new Float32Array(length);
var index = 0,
j = 0;
while (index < length) {
result[index] = data[j];
j += compression;
index++;
}
return result;
},
encodePCM: function () {
//这里不对采集到的数据进行其他格式处理,如有需要均交给服务器端处理。
var sampleRate = Math.min(
this.inputSampleRate,
this.outputSampleRate
);
var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
var bytes = this.compress();
var dataLength = bytes.length * (sampleBits / 8);
var buffer = new ArrayBuffer(dataLength);
var data = new DataView(buffer);
var offset = 0;
for (var i = 0; i < bytes.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, bytes[i]));
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
}
return new Blob([data]);
},
};
}
start() {
this.audioInput.connect(this.recorder);
this.recorder.connect(this.context.destination);
}
stop() {
this.recorder.disconnect();
};
getBlob() {
return this.audioData.encodePCM();
};
clear() {
this.audioData.clear();
};
}
export default Recorder
1. 页面引入上面的js
2. 先连接ws,在初始化Recorder这个类
/**
* @author elongpaox
* @method begin 语音开始
*/
begin() {
navigator.getUserMedia =
navigator.getUserMedia || navigator.webkitGetUserMedia;
if (!navigator.getUserMedia) {
alert("浏览器不支持音频输入");
} else {
navigator.getUserMedia(
{
audio: true
},
mediaStream => {
this.record = new Recorder(mediaStream);
this.useWebSocket();
},
error => {
this.microphoneFlag = false;
this.$message.warning("语音对讲打开失败!");
switch (error.message || error.name) {
case "PERMISSION_DENIED":
case "PermissionDeniedError":
console.info("用户拒绝提供信息。");
break;
case "NOT_SUPPORTED_ERROR":
case "NotSupportedError":
console.info("浏览器不支持硬件设备。");
break;
case "MANDATORY_UNSATISFIED_ERROR":
case "MandatoryUnsatisfiedError":
console.info("无法发现指定的硬件设备。");
break;
default:
console.info(
"无法打开麦克风。异常信息:" +
(error.code || error.name)
);
break;
}
}
);
}
},
3. 调用start方法,开始录音。(useWebSocket)
4. 处理数据发送数据
/**
* @author elongpaox
* @method sendData 语音数据处理
*/
sendData() {
//对以获取的数据进行处理(分包)
var reader = new FileReader();
reader.onload = e => {
var outbuffer = e.target.result;
var arr = new Int8Array(outbuffer);
if (arr.length > 0 && this.videoWs.readyState == 1) {
var tmparr = new Int8Array(1024);
var j = 0;
for (var i = 0; i < arr.byteLength; i++) {
tmparr[j++] = arr[i];
if ((i + 1) % 1024 == 0) {
this.videoWs.send(tmparr);
if (arr.byteLength - i - 1 >= 1024) {
tmparr = new Int8Array(1024);
} else {
tmparr = new Int8Array(arr.byteLength - i - 1);
}
j = 0;
}
if (i + 1 == arr.byteLength && (i + 1) % 1024 != 0) {
this.videoWs.send(tmparr);
}
}
}
};
reader.readAsArrayBuffer(this.record.audioData.encodePCM());
this.record.audioData.clear(); //每次发送完成则清理掉旧数据
},