前后端使用websocket实时传输录音的音频流,
websocket的代码就不在这里提了
下面是封装的一个HZRecorder.js文件
function HZRecorder() {
// 兼容
window.URL = window.URL || window.webkitURL
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
}
function HZRecorderInit(stream, config) {
config = config || {}
config.sampleBits = config.sampleBits || 16 // 采样数位 8, 16
config.sampleRate = config.sampleRate || 16000 // 采样率16khz
var context = new (window.webkitAudioContext || window.AudioContext)()
var audioInput = context.createMediaStreamSource(stream)
var createScript =
context.createScriptProcessor || context.createJavaScriptNode
var recorder = createScript.apply(context, [16384, 30, 30])
var audioData = {
size: 0, // 录音文件长度
buffer: [], // 录音缓存
inputSampleRate: context.sampleRate, // 输入采样率
inputSampleBits: 16, // 输入采样数位 8, 16
outputSampleRate: config.sampleRate, // 输出采样率
oututSampleBits: config.sampleBits, // 输出采样数位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
if (config.ws != null) {
var reader = new FileReader();
reader.readAsDataURL(this.encodeWAV(new Float32Array(data))); // 读出 base64
reader.onloadend = function () {
var audioBase64 = reader.result;//base64
audioBase64 = audioBase64.replace(/^data:audio\/\w+;base64,/, "")
var apiRequest = {
sid: config.sid,
imageId: config.imageId,
audioBuffer: audioBase64,
status: 1
}
config.ws.send(JSON.stringify(apiRequest))
};
}
this.clear()
},
compress: function () {
// 合并压缩
// 合并
var data = new Float32Array(this.size)
var offset = 0
for (var i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 压缩
var compression = parseInt(this.inputSampleRate / this.outputSampleRate)
var length = data.length / compression
var result = new Float32Array(length)
var index = 0
var j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
clear: function () {
this.size = 0
this.buffer = []
},
encodeWAVNoHead: function () {
var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
var bytes = this.compress()
var dataLength = bytes.length * (sampleBits / 8)
var buffer = new ArrayBuffer(44 + dataLength)
var data = new DataView(buffer)
var offset = 0
// 写入采样数据
if (sampleBits === 8) {
for (var i = 0; i < bytes.length; i++, offset++) {
var s = Math.max(-1, Math.min(1, bytes[i]))
var val = s < 0 ? s * 0x8000 : s * 0x7fff
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (var index = 0; index < bytes.length; index++, offset += 2) {
var sIndex = Math.max(-1, Math.min(1, bytes[index]))
data.setInt16(
offset,
sIndex < 0 ? sIndex * 0x8000 : sIndex * 0x7fff,
true
)
}
}
return new Blob([data], { type: 'audio/wav' })
},
encodeWAV: function () {
//var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
var bytes = this.compress();
var dataLength = bytes.length * (sampleBits / 8);
// var buffer = new ArrayBuffer(44 + dataLength);
var buffer = new ArrayBuffer(dataLength);
var data = new DataView(buffer);
//var channelCount = 1;// 单声道
var offset = 0;
// var writeString = function (str) {
// for (var i = 0; i < str.length; i++) {
// data.setUint8(offset + i, str.charCodeAt(i));
// }
// }
// 写入采样数据
if (sampleBits === 8) {
for (var i = 0; i < bytes.length; i++, offset++) {
var s = Math.max(-1, Math.min(1, bytes[i]));
var val = s < 0 ? s * 0x8000 : s * 0x7FFF;
val = parseInt(255 / (65535 / (val + 32768)));
data.setInt8(offset, val, true);
}
} else {
for (var index = 0; index < bytes.length; index++, offset += 2) {
var sIndex = Math.max(-1, Math.min(1, bytes[index
]));
data.setInt16(offset, sIndex < 0 ? sIndex * 0x8000 : sIndex * 0x7FFF, true);
}
}
return new Blob([data], { type: 'audio/wav' });
}
}
// 开始录音
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
// 停止
this.stop = function () {
// 最后一帧发送结束状态2
var apiRequest = {
sid: config.sid,
imageId: config.imageId,
audioBuffer: '',
status: 2
}
config.ws.send(JSON.stringify(apiRequest))
recorder.disconnect()
}
// 音频采集
recorder.onaudioprocess = function (e) {
audioData.input(e.inputBuffer.getChannelData(0))
// record(e.inputBuffer.getChannelData(0));
}
}
// 是否支持录音
HZRecorder.prototype.get = function (callback, config) {
var that = this
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true }, // 只启用音频
function (stream) {
var rec = new HZRecorderInit(stream, config)
callback(rec)
},
function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
alert('用户拒绝提供信息。')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
alert('浏览器不支持硬件设备。')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
alert('无法发现指定的硬件设备。')
break
default:
alert('无法打开麦克风。异常信息:' + (error.code || error.name))
break
}
}
)
} else {
alert('当前浏览器不支持录音功能。')
}
}
}
let obj = new HZRecorder()
export default obj
怎么调用
1.首先引入
import HZRecorder from './HZRecorder.js'
2.调用get函数传递callbak和config
// 获取Recorder实例 【config是一些websocket接口需要的参数以及采样数位、采样率的配置】
HZRecorder.get(function (rec) {
recorderInstance = rec
// 调用start函数开启录音
recorderInstance.start()
}, config)
3.直接调用stop函数来停止录音
recorderInstance.stop()