js录制音频

整体思路

1、监听用户onTouchStart事件,设置一个定时器记录是否长按,然后调用JSBridge开始录制事件
2、通过JSBridge监听录音过程,拿到录音的数据,前端用数组变量存放
3、监听用户onTouchEnd松开事件,清除定时器,处理录音数组转换成一个文件上传到oss

难点

难点一:将base64录音片段转为WAV文件
首先将一系列Base64编码的音频段合并成一个PCM数据流;
然后创建一个WAV文件的头部信息;
最后合并WAV文件头和PCM数据

难点二:TypedArray数据的合并
TypedArray: 存储的是固定类型的数值数据,如整数或浮点数。
Array: 可以存储任何类型的数据,包括数字、字符串、对象等

开始录音

  /**
   * 开始录音
   */
  const handleTouchStart = (event) => {
    event.preventDefault();
    timerId = setTimeout(() => {
      setLongPress(true);
      console.log('handleTouchStart 长按了');
      JSBridge(XX.startRecording', {
        numberOfChannels: 1, // 声道数
        // sampleRate: 16000, // 采样率
        sampleRate: 44100, // 更改采样率为 44100 Hz
        bitsPerChannel: 16, // 位深
        format: 'PCM',
      }).then(() => {
        setRecordStatus('dialog_listening');
      });
    }, 100); // 长按时长,这里设置为100ms
  };

监听录音过程

 const onRecordChange = (event) => {
    console.log(event);

    const { error, param } = event || {};
    const { pcm } = param || {};
    const { errorCode, errorMsg } = error || {};

    if (errorCode) {
      Toast.show({
        type: 'error',
        content: `录制失败,${errorMsg}`,
      });
      baseArrayRef.current = [];
    } else {
      baseArrayRef.current.push(pcm);
    }
  };

  useEffect(() => {
    document.addEventListener('RecordingDataBufferTransfer', onRecordChange);

    return () => {
      // 清除长按定时器
      if (timerId !== null) clearTimeout(timerId);
    };
  }, []);

结束录制

/**
   * 结束录音
   * @returns
   */
  const handleTouchEnd = (event) => {
    if (timerId !== null) {
      clearTimeout(timerId)
      timerId = null
    }
    if (!longPress) return;
    setLongPress(false);
    console.log('handleTouchEnd 松开了');
    JSBridge('XX.stopRecording').then(() => {
      // 移除事件监听器
      document.removeEventListener(
        'RecordingDataBufferTransfer',
        onRecordChange,
      );
      setRecordStatus('dialog_sleep');
      onMerge();
    });
  };

音频波动动画

VoiceAnimation/index.tsx

import cls from 'classnames';
import debounce from 'lodash/debounce';
import { useLayoutEffect, useMemo, useRef } from 'react';
import styles from './index.module.less';

interface IProps {
  status: string;
}
export default function (props: IProps) {
  const { status = 'dialog_sleep' } = props;
  const list = useMemo(() => new Array(5).fill(true), []);

  return (
    <div className={cls(styles.voice, status)}>
      {list.map((_, index) => (
        <AnimationItem status={status} index={index} />
      ))}
    </div>
  );
}

function getTransationByStatus(status: string, index?) {
  return {
    dialog_sleep: {
      transition: 'all 0.3s',
      height: '8px',
      transform: 'translateY(0)',
    },
    dialog_idle: {
      transition: 'all 0.3s',
      height: '8px',
      transform: 'translateY(0)',
    },
    dialog_listening: {
      transition: 'all 0.3s',
      height: '24px',
      transform: index % 2 ? 'translateY(8px)' : 'translateY(-8px)',
      onTransitionEnd: debounce(
        (event) => {
          if (
            event.target.parentElement.className.indexOf('dialog_listening') ===
            -1
          )
            return;
          event.target.style.transitionDuration = '0.5s';
          event.target.style.height = '24px';
          event.target.style.transform =
            event.target.style.transform === 'translateY(8px)'
              ? 'translateY(-8px)'
              : 'translateY(8px)';
        },
        {
          leading: true,
          trailing: false,
        },
      ),
    },
    dialog_thinking: {
      transition: 'all 0.3s',
      height: `${[52, 44, 36, 28, 24][index]}px`,
      transform: 'translateY(0)',
      onTransitionEnd: debounce(
        (event) => {
          if (
            event.target.parentElement.className.indexOf('dialog_thinking') ===
            -1
          )
            return;
          event.target.style.transitionDuration = '0.5s';
          event.target.style.height = {
            '52px': '24px',
            '44px': '28px',
            '36px': '32px',
            '32px': '36px',
            '28px': '44px',
            '24px': '52px',
          }[event.target.style.height];
        },
        {
          leading: true,
          trailing: false,
        },
      ),
    },
    dialog_responding: {
      transition: 'all 0.2s',
      height: `${Math.random() * (index + 1) * 10 + 24}px`,
      transform: 'translateY(0)',
      onTransitionEnd: debounce(
        (event) => {
          if (
            event.target.parentElement.className.indexOf(
              'dialog_responding',
            ) === -1
          )
            return;
          event.target.style.transitionDuration = '0.15s';
          event.target.style.height = `${Math.random() * (index + 1) * 10 + 24}px`;
        },
        {
          leading: true,
          trailing: false,
        },
      ),
    },
  }[status];
}

function AnimationItem({ status, index }: { status: string; index?: number }) {
  const div = useRef<any>();

  useLayoutEffect(() => {
    const container = div.current as HTMLDivElement;
    function reset() {
      container.ontransitionend = (e) => {};
      container.style.transition = 'all .1s';
      container.style.height = '24px';
      container.style.transform = 'translateY(0)';
    }

    reset();

    const { onTransitionEnd = () => {}, ...style } =
      getTransationByStatus(status, index) || {};

    container.ontransitionend = onTransitionEnd;

    for (let prop in style) {
      container.style[prop] = style[prop];
    }

    return () => {};
  }, [status]);

  return (
    <div ref={div} className={styles.item} style={{ width: 24, height: 24 }} />
  );
}

VoiceAnimation/index.module.less

.voice {
  display: flex;
  justify-content: center;
  align-items: center;
  height: 56px;

  .item {
    // width: 24px;
    // height: 24px;
    background-color: var(--TY-Text-Brand-1);
    border-radius: 20px;
    margin: 0 4px;
    transform: translateY(0);
  }
}

.loop(@n, @i: 0) when (@i <= @n) {
  &:nth-child(@{i}) {
    animation-delay: (@i * 0.2s);
  }
  .loop(@n, (@i + 1));
}

一个完整的音频录制——播放的例子

<!DOCTYPE html>
<html lang="en">

<head>
  <meta charset="UTF-8">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <title>pcmtowav</title>
</head>

<body>
  <div>
    getUserMedia需要https,使用localhost或127.0.0.1时,可用http。
  </div>
  <button id="start">开始录音</button>
  <button id="end">结束录音</button>
  <button id="play">播放录音</button>
</body>
<script>
  var context = null,
    inputData = [],
    size = 0,
    audioInput = null,
    recorder = null,
    dataArray;

  document.getElementById('start').addEventListener('click', function () {
    context = new (window.AudioContext || window.webkitAudioContext)();
    // 清空数据
    inputData = [];
    // 录音节点
    recorder = context.createScriptProcessor(4096, 1, 1);

    recorder.onaudioprocess = function (e) {
      var data = e.inputBuffer.getChannelData(0);

      inputData.push(new Float32Array(data));
      size += data.length;
    }

    navigator.mediaDevices.getUserMedia({
      audio: true
    }).then((stream) => {
      audioInput = context.createMediaStreamSource(stream);

    }).catch((err) => {
      console.log('error');
    }).then(function () {
      audioInput.connect(recorder);
      recorder.connect(context.destination);
    });
  });
  document.getElementById('end').addEventListener('click', function () {
    recorder.disconnect();
  });
  document.getElementById('play').addEventListener('click', function () {
    recorder.disconnect();
    if (0 !== size) {
      // 组合数据
      // var data = combine(inputData, size);		
      inputSampleRate = context.sampleRate;
      context.decodeAudioData(encodeWAV().buffer, function (buffer) {
        // decodeAudioData,是支持promise,三参数的知识兼容老的
        playSound(buffer);
      }, function () {
        console.log('error');
      });
      // console.log(data.buffer);
    }
  });
  // ----------------------
  // 以下是增加的内容

  var inputSampleRate = 0;   // 输入采样率
  var oututSampleBits = 16;  // 输出采样数位

  // 数据简单处理
  function decompress() {
    // 合并
    var data = new Float32Array(size);
    var offset = 0; // 偏移量计算
    // 将二维数据,转成一维数据
    for (var i = 0; i < inputData.length; i++) {
      data.set(inputData[i], offset);
      offset += inputData[i].length;
    }
    return data;
  };
  function encodePCM() {
    let bytes = decompress(),
      sampleBits = oututSampleBits,
      offset = 0,
      dataLength = bytes.length * (sampleBits / 8),
      buffer = new ArrayBuffer(dataLength),
      data = new DataView(buffer);

    // 写入采样数据 
    if (sampleBits === 8) {
      for (var i = 0; i < bytes.length; i++, offset++) {
        // 范围[-1, 1]
        var s = Math.max(-1, Math.min(1, bytes[i]));
        // 8位采样位划分成2^8=256份,它的范围是0-255; 16位的划分的是2^16=65536份,范围是-32768到32767
        // 因为我们收集的数据范围在[-1,1],那么你想转换成16位的话,只需要对负数*32768,对正数*32767,即可得到范围在[-32768,32767]的数据。
        // 对于8位的话,负数*128,正数*127,然后整体向上平移128(+128),即可得到[0,255]范围的数据。
        var val = s < 0 ? s * 128 : s * 127;
        val = parseInt(val + 128);
        data.setInt8(offset, val, true);
      }
    } else {
      for (var i = 0; i < bytes.length; i++, offset += 2) {
        var s = Math.max(-1, Math.min(1, bytes[i]));
        // 16位直接乘就行了
        data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
      }
    }

    return data;
  }

  function encodeWAV() {
    var sampleRate = inputSampleRate;
    var sampleBits = oututSampleBits;
    var bytes = encodePCM();

    
    var buffer = new ArrayBuffer(44 + bytes.byteLength);
    var data = new DataView(buffer);

    var channelCount = 1;   // 单声道
    var offset = 0;

    // 资源交换文件标识符 
    writeString(data, offset, 'RIFF'); offset += 4;
    // 下个地址开始到文件尾总字节数,即文件大小-8 
    data.setUint32(offset, 36 + bytes.byteLength, true); offset += 4;
    // WAV文件标志
    writeString(data, offset, 'WAVE'); offset += 4;
    // 波形格式标志 
    writeString(data, offset, 'fmt '); offset += 4;
    // 过滤字节,一般为 0x10 = 16 
    data.setUint32(offset, 16, true); offset += 4;
    // 格式类别 (PCM形式采样数据) 
    data.setUint16(offset, 1, true); offset += 2;
    // 通道数 
    data.setUint16(offset, channelCount, true); offset += 2;
    // 采样率,每秒样本数,表示每个通道的播放速度 
    data.setUint32(offset, sampleRate, true); offset += 4;
    // 波形数据传输率 (每秒平均字节数) 单声道×每秒数据位数×每样本数据位/8 
    data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4;
    // 快数据调整数 采样一次占用字节数 单声道×每样本的数据位数/8 
    data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2;
    // 每样本数据位数 
    data.setUint16(offset, sampleBits, true); offset += 2;
    // 数据标识符 
    writeString(data, offset, 'data'); offset += 4;
    // 采样数据总数,即数据总大小-44 
    data.setUint32(offset, bytes.byteLength, true); offset += 4;

    // 给wav头增加pcm体
    for (let i = 0; i < bytes.byteLength; ++i) {
      data.setUint8(offset, bytes.getUint8(i, true), true);
      offset++;
    }

    return data;
  }

  function getWAVBlob() {
    return new Blob([encodeWAV()], { type: 'audio/wav' });
  }
  function playSound(buffer) {
    var source = context.createBufferSource();

    // 设置数据
    source.buffer = buffer;
    // connect到扬声器
    source.connect(context.destination);
    source.start();
  }

  function writeString(data, offset, str) {
    for (var i = 0; i < str.length; i++) {
      data.setUint8(offset + i, str.charCodeAt(i));
    }
  }
  function combineDataView(resultConstructor, ...arrays) {
    let totalLength = 0,
      offset = 0;
    // 统计长度
    for (let arr of arrays) {
      totalLength += arr.length || arr.byteLength;
    }
    // 创建新的存放变量
    let buffer = new ArrayBuffer(totalLength),
      result = new resultConstructor(buffer);
    // 设置数据
    for (let arr of arrays) {
      // dataview合并
      for (let i = 0, len = arr.byteLength; i < len; ++i) {
        result.setInt8(offset, arr.getInt8(i));
        offset += 1;
      }
    }

    return result;
  }
</script>

</html>
  • 8
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Html5网页纯JavaScript录制MP3音频 <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Html5网页JavaScript录制MP3音频</title> <meta charset="utf-8" /> </head> <body> Html5网页JavaScript录制MP3音频 录制 停止 上传 调试信息: [removed][removed] [removed] var recorder = new MP3Recorder({ debug:true, funOk: function () { btnStart.disabled = false; log('初始化成功'); }, funCancel: function (msg) { log(msg); recorder = null; } }); var mp3Blob; function funStart(button) { btnStart.disabled = true; btnStop.disabled = false; btnUpload.disabled = true; log('录音开始...'); recorder.start(); } function funStop(button) { recorder.stop(); btnStart.disabled = false; btnStop.disabled = true; btnUpload.disabled = false; log('录音结束,MP3导出中...'); recorder.getMp3Blob(function (blob) { log('MP3导出成功'); mp3Blob = blob; var url = URL.createObjectURL(mp3Blob); var div = document.createElement('div'); var au = document.createElement('audio'); var hf = document.createElement('a'); au.controls = true; au.src = url; hf.href = url; hf.download = new Date().toISOString() + '.mp3'; hf[removed] = hf.download; div.appendChild(au); div.appendChild(hf); recordingslist.appendChild(div); }); } function log(str) { recordingslist[removed] += str + ''; } function funUpload() { var fd = new FormData(); var mp3Name = encodeURIComponent('audio_recording_' + new Date().getTime() + '.mp3'); fd.append('mp3Name', mp3Name); fd.append('file', mp3Blob); var xhr = new XMLHttpRequest(); xhr.onreadystatechange = function () { if (xhr.readyState == 4 && xhr.status == 200) { recordingslist[removed] += '上传成功:' + mp3Name + ''; } }; xhr.open('POST', 'upload.ashx'); xhr.send(fd); } [removed] </body> </html> [javascript] view plain copy 在CODE上查看代码片派生到我的代码片 (function (exports) { var MP3Recorder = function (config) { var recorder = this; config = config || {}; config.sampleRate = config.sampleRate || 44100; config.bitRate = config.bitRate || 128; navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; if (navigator.getUserMedia) { navigator.getUserMedia({ audio: true }, function (stream) { var context = new AudioContext(), microphone = context.createMediaStreamSource(stream), processor = context.createScriptProcessor(16384, 1, 1),//bufferSize大小,输入channel数,输出channel数 mp3ReceiveSuccess, currentErrorCallback; config.sampleRate = context.sampleRate; processor.onaudioprocess = function (event) { //边录音边转换 var array = event.inputBuffer.getChannelData(0); realTimeWorker.postMessage({ cmd: 'encode', buf: array }); }; var realTimeWorker = new Worker('js/worker-realtime.js'); realTimeWorker.onmessage = function (e) { switch (e.data.cmd) { case 'init': log('初始化成功'); if (config.funOk) { config.funOk(); } break; case 'end': log('MP3大小:', e.data.buf.length); if (mp3ReceiveSuccess) { mp3ReceiveSuccess(new Blob(e.data.buf, { type: 'audio/mp3' })); } break; case 'error': log('错误信息:' + e.data.error); if (currentErrorCallback) { currentErrorCallback(e.data.error); } break; default: log('未知信息:', e.data); } }; recorder.getMp3Blob = function (onSuccess, onError) { currentErrorCallback = onError; mp3ReceiveSuccess = onSuccess; realTimeWorker.postMessage({ cmd: 'finish' }); }; recorder.start = function () { if (processor && microphone) { microphone.connect(processor); processor.connect(context.destination); log('开始录音'); } } recorder.stop = function () { if (processor && microphone) { microphone.disconnect(); processor.disconnect(); log('录音结束'); } } realTimeWorker.postMessage({ cmd: 'init', config: { sampleRate: config.sampleRate, bitRate: config.bitRate } }); }, function (error) { var msg; switch (error.code || error.name) { case 'PERMISSION_DENIED': case 'PermissionDeniedError': msg = '用户拒绝访问麦客风'; break; case 'NOT_SUPPORTED_ERROR': case 'NotSupportedError': msg = '浏览器不支持麦客风'; break; case 'MANDATORY_UNSATISFIED_ERROR': case 'MandatoryUnsatisfiedError': msg = '找不到麦客风设备'; break; default: msg = '无法打开麦克风,异常信息:' + (error.code || error.name); break; } if (config.funCancel) { config.funCancel(msg); } }); } else { if (config.funCancel) { config.funCancel('当前浏览器不支持录音功能'); } } function log(str) { if (config.debug) { console.log(str); } } } exports.MP3Recorder = MP3Recorder; })(window);
getUserMedia是一个Web API,用于在浏览器中访问用户的媒体设备,例如摄像头和麦克风。通过getUserMedia,您可以使用JavaScript代码从用户的设备中获取音频、视频或者音视频流。 要使用getUserMedia录制音频,您可以按照以下步骤进行操作: 1. 获取用户媒体设备的许可:首先,您需要请求用户的许可来访问其麦克风。您可以使用`navigator.mediaDevices.getUserMedia`方法来请求许可,并指定要获取的媒体类型为音频。示例代码如下: ```javascript navigator.mediaDevices.getUserMedia({ audio: true }) .then(function(stream) { // 许可已获得,可以开始录制音频 }) .catch(function(error) { // 发生错误,无法访问用户的麦克风 }); ``` 2. 录制音频:一旦用户授予了访问麦克风的许可,您将获得一个媒体流对象。您可以使用这个流对象来录制音频。示例代码如下: ```javascript navigator.mediaDevices.getUserMedia({ audio: true }) .then(function(stream) { const mediaRecorder = new MediaRecorder(stream); // 开始录制音频 mediaRecorder.start(); // 在录制过程中,可以监听dataavailable事件来获取录制音频数据 mediaRecorder.addEventListener('dataavailable', function(event) { const audioData = event.data; // 处理音频数据 }); // 在录制完成后,可以监听stop事件来停止录制 mediaRecorder.addEventListener('stop', function() { // 录制已停止 }); }) .catch(function(error) { // 发生错误,无法访问用户的麦克风 }); ``` 3. 停止录制:当您想要停止录制音频时,可以调用`mediaRecorder.stop()`方法。停止录制后,将触发`stop`事件。 这就是使用getUserMedia录制音频的基本步骤。您可以根据需要进行进一步的处理和操作,例如保存录制音频文件或进行实时处理。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值