前言
javascript video to audio。前端视频转(提取)音频。使用FileReader加载视频,然后decodeAudioData对其进行解码,并使用OfflineAudioContext重新渲染,最后将audiobuffer转换为wav。
demo:https://237005722.github.io/video-to-audio/
一、引用参考
- 参考:https://github.com/mdn/webaudio-examples/tree/master/offline-audio-context-promise
- 引用:https://github.com/Jam3/audiobuffer-to-wav
二、示例代码
1.视频转换(提取)音频
/***
* video-to-audio
* creater:qc
* reference://github.com/mdn/webaudio-examples/tree/master/offline-audio-context-promise
*/
const videoToAudio = async(file) => {
try {
console.log('videoToAudio file', file)
const fileData = new Blob([file]) // video file
const arrayBuffer = await new Promise((resolve) => {
const reader = new FileReader()
reader.onload = ()=> {
const arrayBuffer = reader.result
resolve(arrayBuffer)
}
reader.readAsArrayBuffer(fileData)
})
console.log('arrayBuffer', arrayBuffer)
const audioContext = new(window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext)()
const decodedAudioData = await audioContext.decodeAudioData(arrayBuffer)
console.log('decodedAudioData', decodedAudioData)
const fileDuration = durationTrans(decodedAudioData.duration)
console.log('fileDuration', fileDuration)
const offlineAudioContext = new OfflineAudioContext(decodedAudioData.numberOfChannels, decodedAudioData.sampleRate * decodedAudioData.duration, decodedAudioData.sampleRate)
const soundSource = offlineAudioContext.createBufferSource()
soundSource.buffer = decodedAudioData
soundSource.connect(offlineAudioContext.destination)
soundSource.start()
const renderedBuffer = await offlineAudioContext.startRendering()
console.log('renderedBuffer', renderedBuffer) // outputs audiobuffer
const wav = audioBufferToWav(renderedBuffer)
const fileType = `wav`
const fileName = `${
file.name}.${
fileType}`
downloadWav(wav, fileName)
return {
fileName, fileType, fileDuration }
} catch (error)