h5 Web RTC + audio API 实现录音,并压缩

https://juejin.cn/post/7243240618788012090
音波:https://xiangyuecn.github.io/Recorder/assets/%E5%B7%A5%E5%85%B7-%E4%BB%A3%E7%A0%81%E8%BF%90%E8%A1%8C%E5%92%8C%E9%9D%99%E6%80%81%E5%88%86%E5%8F%91Runtime.html?jsname=teach.source_stream.capture_stream
Web RTC + audio API 实现录音,并压缩

   <button onclick="record()">开始录音</button>
    <button onclick="stopRecord()">结束录音</button>
    <!-- <button onclick="resetRecord()">重置录音</button> -->
    <audio class="audio-node" id="audio" autoplay></audio>
    <script>
 class Recoder {
  constructor (sampleRate) {
    this.leftDataList = []
    this.rightDataList = []
    this.mediaPlayer = null
    this.audioContext = null
    this.source = null
    this.sampleRate = sampleRate || 44100
  }
  startRecord () {
    return new Promise((resolve, reject) => {
      window.navigator.mediaDevices.getUserMedia({
        audio: {
          sampleRate: 8000, // 采样率
          channelCount: 1, // 声道
          audioBitsPerSecond : 64,
          volume: 1.0, // 音量
          autoGainControl: true
        }
      }).then(mediaStream => {
        console.log(mediaStream,'mediaStream')
        this.mediaPlayer = mediaStream
        this.beginRecord(mediaStream)
        resolve()
      }).catch(err => {
        // 如果用户电脑没有麦克风设备或者用户拒绝了,或者连接出问题了等
        // 这里都会抛异常,并且通过err.name可以知道是哪种类型的错误
        console.error(err)
        reject(err)
      })
    })
  }
 
  beginRecord (mediaStream) {
    let audioContext = new (window.AudioContext || window.webkitAudioContext)()
    // mediaNode包含 mediaStream,audioContext
    let mediaNode = audioContext.createMediaStreamSource(mediaStream)
    console.log(mediaNode,'mediaNode')
    // 创建一个jsNode
    // audioContext.sampleRate = 8000
    console.log(audioContext,'audioContext')
    let jsNode = this.createJSNode(audioContext)
    console.log(jsNode,'jsnode')
    // 需要连到扬声器消费掉outputBuffer,process回调才能触发
    // 并且由于不给outputBuffer设置内容,所以扬声器不会播放出声音
    jsNode.connect(audioContext.destination)
    jsNode.onaudioprocess = this.onAudioProcess.bind(this)
    // 把mediaNode连接到jsNode
    mediaNode.connect(jsNode)
    this.audioContext = audioContext
  }
 
  onAudioProcess (event) {
    console.log('is recording')
    // 拿到输入buffer Float32Array
    let audioBuffer = event.inputBuffer
    let leftChannelData = audioBuffer.getChannelData(0)
   // let rightChannelData = audioBuffer.getChannelData(1)
  
    // 需要克隆一下
    this.leftDataList.push(leftChannelData.slice(0))
    //this.rightDataList.push(rightChannelData.slice(0))
  }
 
  createJSNode (audioContext) {
    const BUFFER_SIZE = 4096
    const INPUT_CHANNEL_COUNT = 1
    const OUTPUT_CHANNEL_COUNT = 1
    // createJavaScriptNode已被废弃
    let creator = audioContext.createScriptProcessor || audioContext.createJavaScriptNode
    creator = creator.bind(audioContext)
    return creator(BUFFER_SIZE, INPUT_CHANNEL_COUNT, OUTPUT_CHANNEL_COUNT)
  }
 
  playRecord (arrayBuffer) {
    let blob = new Blob([new Int8Array(arrayBuffer)], {
      type: 'audio/mp3' // files[0].type
    })
    let blobUrl = URL.createObjectURL(blob)
    this.source = blob
    this.blobUrl = blobUrl
    // document.querySelector('.audio-node').src = blobUrl
    return blobUrl
  }
 
  stopRecord () {
    // 停止录音
    let leftData = this.mergeArray(this.leftDataList)
    //let rightData = this.mergeArray(this.rightDataList)
    let allData = this.interSingleData(leftData)
    let wavBuffer = this.createWavFile(allData)
 
    let source = this.playRecord(wavBuffer)
    this.resetRecord()
    return source
  }
 
  transformArrayBufferToBase64 (buffer) {
    var binary = ''
    var bytes = new Uint8Array(buffer)
    for (var len = bytes.byteLength, i = 0; i < len; i++) {
      binary += String.fromCharCode(bytes[i])
    }
    return window.btoa(binary)
  }
 
  // 停止控件录音
  resetRecord () {
    this.leftDataList = []
    this.rightDataList = []
    this.audioContext.close()
    this.mediaPlayer.getAudioTracks().forEach(track => {
      track.stop()
      this.mediaPlayer.removeTrack(track)
    })
  }
 
  createWavFile (audioData) {
    let channelCount = 1
    const WAV_HEAD_SIZE = 44
    const sampleBits = 16
    let sampleRate = this.sampleRate
 
    let buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE)
    // 需要用一个view来操控buffer
    let view = new DataView(buffer)
    // 写入wav头部信息
    // RIFF chunk descriptor/identifier
    this.writeUTFBytes(view, 0, 'RIFF')
    // RIFF chunk length
    view.setUint32(4, 44 + audioData.length * channelCount, true)
    // RIFF type
    this.writeUTFBytes(view, 8, 'WAVE')
    // format chunk identifier
    // FMT sub-chunk
    this.writeUTFBytes(view, 12, 'fmt ')
    // format chunk length
    view.setUint32(16, 16, true)
    // sample format (raw)
    view.setUint16(20, 1, true)
    // stereo (2 channels)
    view.setUint16(22, channelCount, true)
    // sample rate
    view.setUint32(24, sampleRate , true)
    // byte rate (sample rate * block align)
    view.setUint32(28, sampleRate * 2, true)
    // block align (channel count * bytes per sample)
    view.setUint16(32, 2 * 2, true)
    // bits per sample
    view.setUint16(34, 16, true)
    // data sub-chunk
    // data chunk identifier
    this.writeUTFBytes(view, 36, 'data')
    // data chunk length
    view.setUint32(40, audioData.length * 2, true)
 
    console.log(view,'view')
    let length = audioData.length
    let index = 44
    let volume = 1
    for (let i = 0; i < length; i++) {
      view.setInt16(index, audioData[i] * (0x7FFF * volume), true)
      index += 2
    }
    return buffer
  }
 
  writeUTFBytes (view, offset, string) {
    var lng = string.length
    for (var i = 0; i < lng; i++) {
      view.setUint8(offset + i, string.charCodeAt(i))
    }
  }
 
  interSingleData (left) {
      var t = left.length;
      let sampleRate = this.audioContext.sampleRate,
      outputSampleRate = this.sampleRate
      sampleRate += 0.0;
      outputSampleRate += 0.0;
      var s = 0,
      o = sampleRate / outputSampleRate,
      u = Math.ceil(t * outputSampleRate / sampleRate),
      a = new Float32Array(u);
      for (let i = 0; i < u; i++) {
        a[i] = left[Math.floor(s)];
        s += o;
      }
      return a;
  }
 
  // 交叉合并左右声道的数据
  interleaveLeftAndRight (left, right) {
    let totalLength = left.length + right.length
    let data = new Float32Array(totalLength)
    for (let i = 0; i < left.length; i++) {
      let k = i * 2
      data[k] = left[i]
      data[k + 1] = right[i]
    }
    return data
  }
 
  mergeArray (list) {
    let length = list.length * list[0].length
    let data = new Float32Array(length)
    let offset = 0
    for (let i = 0; i < list.length; i++) {
      data.set(list[i], offset)
      offset += list[i].length
    }
    return data
  }
 
  // 播放音乐
  playMusic () {
    if (!this.value) {
      return
    }
    // 直接使用File对象生成blob url
    let blobUrl = URL.createObjectURL(this.files[0])
    document.querySelector('.audio-node').src = blobUrl
  }
 
  play (arrayBuffer) {
    // Safari需要使用webkit前缀
    let AudioContext = this.AudioContext || this.webkitAudioContext
    let audioContext = new AudioContext()
    // 创建一个AudioBufferSourceNode对象,使用AudioContext的工厂函数创建
    let audioNode = audioContext.createBufferSource()
    // 解码音频,可以使用Promise,但是较老的Safari需要使用回调
    audioContext.decodeAudioData(arrayBuffer, function (audioBuffer) {
      audioNode.buffer = audioBuffer
      audioNode.connect(audioContext.destination)
      // 从0s开始播放
      audioNode.start(0)
    })
  }
}
let recoder = new Recoder(8000)
function record() {
  recoder.startRecord()
}
 
function stopRecord(params) {
    recoder.stopRecord()
    let source = recoder.source
    let formData = new FormData()
    formData.append('audio', source)
    let audio = document.getElementById('audio')
    audio.src = recoder.blobUrl
}
</script>
 参考 自掘金

 1.网上很多都没有停之功能,新增停之

 2.这里用的8000采样率和单声道,音频体积为原来的 快 1/12  (我电脑设备的采样录是44K)

 3.详细解释请看 H5音频分析
WebRTCWeb Real-Time Communication)是一个支持网页浏览器进行实时语音对话、视频对话和点对点文件共享的开源项目。以下是一个简单的WebRTC录音的代码示例,使用了MediaRecorder API来捕获音频流并将其录制为文件。 ```javascript // HTML部分,包含audio元素用于播放录制的音频 // <audio id="audioPlayer" controls></audio> // JavaScript部分 // 获取audio元素 var audioPlayer = document.querySelector('#audioPlayer'); // 获取音频流 navigator.mediaDevices.getUserMedia({ audio: true, video: false }) .then(function(stream) { // 将音频流绑定到audio元素上,以便播放 audioPlayer.srcObject = stream; // 使用MediaRecorder API进行录音 var mediaRecorder = new MediaRecorder(stream); // 准备存放录音数据的数组 var chunks = []; // 当有音频数据可使用时,会触发dataavailable事件 mediaRecorder.ondataavailable = function(event) { chunks.push(event.data); }; // 当录音结束时,会触发stop事件 mediaRecorder.onstop = function() { // 将所有数据块合成一个音频文件 var audioBlob = new Blob(chunks, { 'type' : 'audio/ogg' }); // 创建下载链接 var audioUrl = URL.createObjectURL(audioBlob); audioPlayer.src = audioUrl; // 创建一个下载链接让用户可以下载录音文件 var downloadLink = document.createElement('a'); downloadLink.href = audioUrl; downloadLink.download = 'recorded-audio.ogv'; document.body.appendChild(downloadLink); downloadLink.click(); document.body.removeChild(downloadLink); }; // 开始录音 mediaRecorder.start(); // 在60秒后停止录音 setTimeout(function() { mediaRecorder.stop(); }, 60000); }).catch(function(error) { console.log("发生错误:", error); }); ``` 这段代码首先通过`navigator.mediaDevices.getUserMedia`获取音频流,然后使用`MediaRecorder`对象来录制这个音频流。当用户调用`start()`方法后,`MediaRecorder`开始监听数据,并在有可用音频数据时触发`dataavailable`事件,将数据块存储到数组中。当调用`stop()`方法后,会触发`stop`事件,此时可以将所有数据块合成一个音频文件,并创建一个下载链接供用户下载。 请注意,上述代码是一个基本的示例,实际应用中可能需要处理更多的错误情况和用户交互,以及兼容不同浏览器的实现
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值