在一个项目中前端使用MediaRecorder录制的wav文件缺少wav文件头,导致后端的语音识别模型报错。后使用recorder解决(原生js实现的web端录音)recorder的gitee地址
期间使用MediaRecorder的时候尝试加入wav头,但是音频出现损坏(音频出现了刺啦声)。我看recorder也是加上wav文件头,为什么下面这种方法不行。希望有大佬解惑
handleYy() {
// 检查浏览器是否支持录音功能
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ audio: true })
.then((stream) => {
// 创建一个MediaRecorder实例
if (this.mediaRecorder === null) {
this.mediaRecorder = new MediaRecorder(stream)
}
let audioTracks = stream.getAudioTracks()
// let sampleRate = 8000 // 假设的采样率
// let numChannels = 1 // 单声道
// let sampleDepth = 16 // 16 位深度
let sampleRate = null // 假设的采样率
let numChannels = null // 单声道
let sampleDepth = null // 16 位深度
if (audioTracks.length > 0) {
const settings = audioTracks[0].getSettings()
sampleRate = settings.sampleRate // 采样率
numChannels = settings.channelCount // 声道数
// 采样深度通常固定为16位,但您也可以从settings中获取
sampleDepth = settings.sampleSize // 采样深度(如果可用)
}
// 监听MediaRecorder的状态变化
this.state = this.mediaRecorder.state
switch (this.state) {
case 'inactive':
this.mediaRecorder.start()
break
case 'recording':
this.mediaRecorder.stop()
this.mediaRecorder.ondataavailable = async(event) => {
if (event.data.size > 0) {
// 将录制的音频数据转换为Blob
const audioBlob = new Blob([event.data], { type: 'audio/wav' })
// 创建一个URL,可以用于下载或播放音频
// const audioUrl = URL.createObjectURL(audioBlob);
// 创建 WAV 文件头
let buffer = new ArrayBuffer(44)
let view = new DataView(buffer)
// RIFF chunk descriptor
this.writeString(view, 0, 'RIFF')
view.setUint32(4, 36 + audioBlob.size, true)
this.writeString(view, 8, 'WAVE')
// FMT sub-chunk
this.writeString(view, 12, 'fmt ')
view.setUint32(16, 16, true) // Sub-chunk size
view.setUint16(20, 1, true) // Audio format (1 = PCM)
view.setUint16(22, numChannels, true)
view.setUint32(24, sampleRate, true)
view.setUint32(28, sampleRate * numChannels * (sampleDepth / 8), true) // Byte rate
view.setUint16(32, numChannels * (sampleDepth / 8), true) // Block align
view.setUint16(34, sampleDepth, true) // Bits per sample
// Data sub-chunk
this.writeString(view, 36, 'data')
view.setUint32(40, audioBlob.size, true)
// 将 WAV 文件头和音频数据合并
let wavBlob = new Blob([buffer, audioBlob], { 'type': 'audio/wav' })
let wavURL = window.URL.createObjectURL(wavBlob)
// let wavAudio = new Audio(wavURL)
// 在这里你可以使用audioUrl来做任何事情,比如播放或下载录音
// console.log('Recorded audio URL:', audioBlob);
// console.log(audioUrl)
// const res = await generationVoice({ file:audioBlob })
// console.log('res',res)
// 在这里你可以使用audioUrl来做任何事情,比如播放或下载录音
console.log('Recorded audio URL:', wavURL)
const res = await generationVoice({ file: wavBlob })
console.log('res', res)
}
}
console.log('Recorder is recording')
break
case 'stopped':
console.log('Recorder is stopped')
break
default:
console.log('Unknown state:', this.state)
}
console.log(this.mediaRecorder)
// 监听数据流事件
// 自动开始录制
console.log('Recording started')
// 当你想停止录制时
// mediaRecorder.stop();
})
.catch(function(err) {
// 处理错误情况
console.error('Error accessing media devices:', err)
})
} else {
console.log('Your browser does not support the MediaRecorder API.')
}
},