基于阿里云实时语音识别websocket协议说明开发,HTML+JS实现

该代码实现了一个使用阿里云服务的实时录音功能,通过WebSocket与服务器交互,将录音转化为文本。当用户点击开始按钮时,开始录音并发送数据到服务器进行语音识别,识别结果实时显示。结束按钮则停止录音并关闭连接。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

<!DOCTYPE html>
<html>
<head>
  <meta charset="UTF-8">
  <title>阿里云实时录音</title>
  <style>
    #status {
        position: relative;
        padding: 10px 24px;
        height: 408px;
        flex-direction: row;
        justify-content: center;
        align-items: center;
        background: #f8f8f8;
        overflow-y: auto;
      }

      .result-text{
          width: 100%;
          height: 100%;
          padding: 6px 0;
          overflow-y: auto;
          font-size: 14px;
          color: #181818;
          line-height: 36px;
      }
  </style>
</head>
<body>

<button id="start">开始</button>
<button id="stop">结束</button>  
<span> 时间: <span id="time"></span> </span>
<br/>
<div id="status"></div>

<script  type="text/javascript" src="recorder.js"></script>
<script  type="text/javascript" src="ws.js"></script>
<script>

</script>
</body>
</html>
// ws.js
const startBtn = document.getElementById('start');
const stopBtn = document.getElementById('stop');
const statusDiv = document.getElementById('status');
const timeSpan = document.getElementById('time');

//初始化录音实例
let recorder = new Recorder({
  sampleBits: 16,                 // 采样位数,,默认是16
  sampleRate: 16000,              //音频采样率,默认是16000Hz,
  numChannels: 1,                 // 声道,支持 1 或 2, 默认是1
  compiling: true                 // 是否边录边转换,默认是false
});


//获取ali的token
let token = null;
let appkey = null;

var interval; // 定时器

//定义ws的相关参数
let websocket = null; //websocket实例
let timer_websocket = null; //websocket定时器, 用于实时获取语音转文本的结果
let websocket_task_id = null; //websocket任务id 整个实时语音识别的会话ID,整个请求中需要保持一致,32位唯一ID。
let websocket_audio2txt_time = 0; //websocket 语音转文本  一句话收集完毕的时间,用于判断间隔 
let websocket_audio2txt_result_msg = null; //websocket实例 音频转文字的结果
let websocket_audio2txt_result_msg_temp = null; //websocket实例 音频转文字的结果
let websocket_audio2txt_complete_b = false; //websocket 语音转文本  是否完成   true:完毕  false:未完毕
let websocket_audio2txt_complete_time_end = 100;  //websocket 语音转文本 判断文本是否收集完毕的阈值  单位毫秒 


//设置cookies
const cookies  = '';

//请求api获取token的信息
getToken();
async function getToken() {
  const url = 'https://xxxxxx/ali/token';
  await fetch(url, { 
      method: 'GET',
      headers: {
        "Content-Type": "application/json",
        'Authorization': cookies
      }
    })
  .then(res => res.json())
  .then(res => {  
    //设置token
    token = res.data.token;
    appkey = res.data.appkey;
  })
  .catch(error => {
    console.log('error:',error);
    alert('获取token失败');
  });
}


//开始录音
startBtn.onclick = function() {
  recorder.start().then(() => {
    timeSpan.innerText = '0s';
    statusDiv.innerHTML = '';
    websocket_audio2txt_result_msg = "";//置空
    initWebSocket();
}, (error) => {
    console.log(`出错了`);
});


  recorder.onprogress = function(params) {
    timeSpan.innerText = Math.floor(params.duration) + 's';
    //  console.log('--------------START---------------')
    // console.log('录音时长(秒)', params.duration);
    //  console.log('录音大小(字节)', params.fileSize);
    //  console.log('录音音量百分比(%)', params.vol);
    // console.log('当前录音的总数据)', params.data);
    //  console.log('--------------END---------------')
  }

};


stopBtn.onclick = function() {
  console.log('结束录音');
  recorder.stop();
  //initWebSocket();
  clearInterval(interval);

}; 


playBtn.onclick = function() {
  console.log('播放录音');
  recorder.play();//播放录音
}; 


//初始化websocket
function initWebSocket()
{
  console.log("初始化weosocket");

  //初始化参数
  websocket_audio2txt_complete_b = false;
  websocket_audio2txt_time = 0;

  //检测如果未关闭、则先关闭在重连
  if (websocket !== null){
    websocket.close();
    websocket = null;
  }

  //ali的websocket地址
  const wsuri = `wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1?token=${token}`;

  //连接wss服务端
  websocket = new WebSocket(wsuri);
  //指定回调函数
  websocket.onopen    = websocketOnOpen;
  websocket.onmessage = websocketOnMessage;
  websocket.onerror   = websocketOnError;
  websocket.onclose   = websocketClose;
}

//建立连接
function websocketOnOpen(){
   // console.log("向 websocket 发送 链接请求");

    //生成新的任务id
    websocket_task_id = getRandomStrNum();
    //生成ali的请求参数message_id
    let message_id = getRandomStrNum();
    let actions = {
      "header":{
        "namespace":"SpeechTranscriber",    //固定值  
        "name": "StartTranscription",       //发送请求的名称,固定值
        "appkey": appkey,                   //appkey
        "message_id": message_id,           //消息id
        "task_id": websocket_task_id,       //任务id  
      },
      "payload":{
        "format": "PCM",//音频编码格式,默认是PCM(无压缩的PCM文件或WAV文件),16bit采样位数的单声道。
        "sample_rate": 16000, //需要与录音采样率一致、默认是16000,单位是Hz。
        "enable_intermediate_result": true, //是否返回中间识别结果,默认是false。
        "enable_punctuation_prediction": true, //是否在后处理中添加标点,默认是false。
        "enable_inverse_text_normalization": true, //是否在后处理中执行数字转写,默认是false。
        "max_sentence_silence":500,//	语音断句检测阈值,静音时长超过该阈值会被认为断句,参数范围200ms~2000ms,默认值800ms。
      }
    }

   //发送请求
   websocketSend(JSON.stringify(actions));
}


/****************ws 请求处理 start *********************/
//发送数据
function websocketSend(data) {
   //console.log('websocket 数据发送',data);
   //判断是否连接成功,连接成功再发送数据过去
   if (websocket.readyState===1){
       websocket.send(data);
    } else {
        console.log('websock未连接-------------------');
    }
}

//接收数据
function websocketOnMessage(e){
  //接受ali 语音返回的数据
  const ret = JSON.parse(e.data);
  //判断返回的数据类型
  if (ret.header.name === 'TranscriptionResultChanged'){
     //数据在收集中 一句话的中间结果
     console.log('数据在收集中');
    //实时获取语音转文本的结果
     ingText(ret.payload.result);
  } else if (ret.header.name === 'SentenceBegin') {
    //一句话开始后,就可以启动录音了
    console.log('检测到了一句话的开始');
    //添加一个新的p标签、用于显示中间变化状态
    var span = document.createElement("p")
    span.innerText = ""
    statusDiv.appendChild(span)
  } else if (ret.header.name === 'TranscriptionStarted') {
    console.log("服务端已经准备好了进行识别,客户端可以发送音频数据了");
    //获取音频信息,定时获取并发送
    interval = setInterval(() => {
      getPCMAndSend();
    }, 100)
  } else if (ret.header.name === 'SentenceEnd') {
     console.log('数据接收结束', ret);
     endText(ret.payload.result);
  } else if ( ret.header.name === 'TranscriptionCompleted') {
    console.log('服务端已停止了语音转写', ret);
  } 
}

//错误处理
function websocketOnError(e){
  console.log("连接建立失败重连");
  //initWebSocket();
}

//关闭处理
function websocketClose(e){
  console.log('websocketClose断开连接',e);
}

//wss 连接建立之后发送 StopTranscription指令
function websocketSendStop(){
  console.log("向  websocket 发送 Stop指令");
  let message_id = getRandomStrNum();
  //actions 是首次连接需要的参数,可自行看阿里云文档
  let actions = {
    "header": {
        "message_id": message_id,
        "task_id": websocket_task_id,
        "namespace": "SpeechTranscriber",
        "name": "StopTranscription",
        "appkey": appkey,
    }
  };

  //发送结束指令
  websocketSend(JSON.stringify(actions));
}


function ingText(text)
{
  let ps = statusDiv.querySelectorAll('p');
  //更新中间变化状态 
  let lastP = ps[ps.length - 1];
  lastP.innerText = text;
}
 

//设置定时器-websocket 实时获取语音转文本的结果
function endText(text)
{
  let ps = statusDiv.querySelectorAll('p');
  //更新最后的识别结果
  let lastP = ps[ps.length - 1];
  lastP.innerText = text;

  //获取全文
  websocket_audio2txt_result_msg += text
  console.log('websocket_audio2txt_result_msg:',websocket_audio2txt_result_msg);

  //todo 可以进行匹配语音匹配了

}


/****************ws 请求处理  end *********************/


//获取音频信息,并发送
function getPCMAndSend()
{
  //获取音频信息
  let NextData = recorder.getNextData();
  let blob = new Blob([NextData])
  let blob_size = blob.size;
  console.log("获取音频信息,并发送,blob_size:"+blob_size , blob);

  //ali最大支持3200字节的音频
  let max_blob_size = 3200;//支持1600 或3200
  let my_num = blob_size/max_blob_size;
  my_num = my_num + 1;

  //切分音频发送
  for (let i = 0; i < my_num; i++) {
      var end_index_blob = max_blob_size*(i+1);
      //判断结束时候的分界
      if(end_index_blob > blob_size){
          end_index_blob = blob_size;
      }
      //切分音频
      var blob2 = blob.slice(i*max_blob_size,end_index_blob);
      //生成新的blob
      const newbolb = new Blob([blob2], {type: 'audio/pcm'})
      //发送
      websocketSend(newbolb);
  }
}


//生成32位随机数UUID
function getRandomStrNum(){
  var s = [];
  var hexDigits = "0123456789abcdef";
  for (var i = 0; i < 32; i++) {
    s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1);
  }
  s[14] = "4";  // bits 12-15 of the time_hi_and_version field to 0010
  s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1);  // bits 6-7 of the clock_seq_hi_and_reserved to 01
  s[8] = s[13] = s[18] = s[23];

  var uuid = s.join("");
  return uuid;
}
/*!
 * 
 * js-audio-recorder - js audio recorder plugin
 * 
 * @version v0.5.7
 * @homepage https://github.com/2fps/recorder
 * @author 2fps <echoweb@126.com> (https://www.zhuyuntao.cn)
 * @license MIT
 *         
 */
!function (t, e) {
    "object" == typeof exports && "object" == typeof module ? module.exports = e() : "function" == typeof define && define.amd ? define([], e) : "object" == typeof exports ? exports.Recorder = e() : t.Recorder = e()
}(this, function () {
    return function (t) {
        var e = {};

        function i(n) {
            if (e[n]) return e[n].exports;
            var r = e[n] = {
                i: n,
                l: !1,
                exports: {}
            };
            return t[n].call(r.exports, r, r.exports, i), r.l = !0, r.exports
        }

        return i.m = t, i.c = e, i.d = function (t, e, n) {
            i.o(t, e) || Object.defineProperty(t, e, {
                enumerable: !0,
                get: n
            })
        }, i.r = function (t) {
            "undefined" != typeof Symbol && Symbol.toStringTag && Object.defineProperty(t, Symbol.toStringTag, {
                value: "Module"
            }), Object.defineProperty(t, "__esModule", {
                value: !0
            })
        }, i.t = function (t, e) {
            if (1 & e && (t = i(t)), 8 & e) return t;
            if (4 & e && "object" == typeof t && t && t.__esModule) return t;
            var n = Object.create(null);
            if (i.r(n), Object.defineProperty(n, "default", {
                enumerable: !0,
                value: t
            }), 2 & e && "string" != typeof t)
                for (var r in t) i.d(n, r, function (e) {
                    return t[e]
                }.bind(null, r));
            return n
        }, i.n = function (t) {
            var e = t && t.__esModule ? function () {
                return t.default
            } : function () {
                return t
            };
            return i.d(e, "a", e), e
        }, i.o = function (t, e) {
            return Object.prototype.hasOwnProperty.call(t, e)
        }, i.p = "", i(i.s = 0)
    }([function (t, e, i) {
        "use strict";
        Object.defineProperty(e, "__esModule", {
            value: !0
        });
        var n = function () {
            function t(e) {
                void 0 === e && (e = {}), this.isplaying = !1, this.lBuffer = [], this.rBuffer = [], this.tempPCM = [], this.inputSampleBits = 16, this.playStamp = 0, this.playTime = 0, this.totalPlayTime = 0, this.offset = 0, this.fileSize = 0;
                var i, n = new (window.AudioContext || window.webkitAudioContext);
                this.inputSampleRate = n.sampleRate, this.config = {
                    sampleBits: ~[8, 16].indexOf(e.sampleBits) ? e.sampleBits : 16,
                    sampleRate: ~[8e3, 11025, 16e3, 22050, 24e3, 44100, 48e3].indexOf(e.sampleRate) ? e.sampleRate : this.inputSampleRate,
                    numChannels: ~[1, 2].indexOf(e.numChannels) ? e.numChannels : 1,
                    compiling: !!e.compiling || !1
                }, this.outputSampleRate = this.config.sampleRate, this.oututSampleBits = this.config.sampleBits, this.littleEdian = (i = new ArrayBuffer(2), new DataView(i).setInt16(0, 256, !0), 256 === new Int16Array(i)[0]), t.initUserMedia()
            }

            return t.prototype.initRecorder = function () {
                var t = this;
                this.context && this.destroy(), this.context = new (window.AudioContext || window.webkitAudioContext), this.analyser = this.context.createAnalyser(), this.analyser.fftSize = 2048;
                var e = this.context.createScriptProcessor || this.context.createJavaScriptNode;
                this.recorder = e.apply(this.context, [4096, this.config.numChannels, this.config.numChannels]), this.recorder.onaudioprocess = function (e) {
                    if (t.isrecording && !t.ispause) {
                        var i, n = e.inputBuffer.getChannelData(0),
                            r = null;
                        if (t.lBuffer.push(new Float32Array(n)), t.size += n.length, 2 === t.config.numChannels && (r = e.inputBuffer.getChannelData(1), t.rBuffer.push(new Float32Array(r)), t.size += r.length), t.config.compiling) {
                            var o = t.transformIntoPCM(n, r);
                            t.tempPCM.push(o), t.fileSize = o.byteLength * t.tempPCM.length
                        } else t.fileSize = Math.floor(t.size / Math.max(t.inputSampleRate / t.outputSampleRate, 1)) * (t.oututSampleBits / 8);
                        i = 100 * Math.max.apply(Math, n), t.duration += 4096 / t.inputSampleRate, t.onprocess && t.onprocess(t.duration), t.onprogress && t.onprogress({
                            duration: t.duration,
                            fileSize: t.fileSize,
                            vol: i,
                            data: t.tempPCM
                        })
                    }
                }
            }, t.prototype.start = function () {
                var t = this;
                if (!this.isrecording) return this.clear(), this.initRecorder(), this.isrecording = !0, navigator.mediaDevices.getUserMedia({
                    audio: !0
                }).then(function (e) {
                    t.audioInput = t.context.createMediaStreamSource(e), t.stream = e
                }).then(function () {
                    t.audioInput.connect(t.analyser), t.analyser.connect(t.recorder), t.recorder.connect(t.context.destination)
                })
            }, t.prototype.pause = function () {
                this.isrecording && !this.ispause && (this.ispause = !0)
            }, t.prototype.resume = function () {
                this.isrecording && this.ispause && (this.ispause = !1)
            }, t.prototype.stop = function () {
                this.isrecording = !1, this.audioInput && this.audioInput.disconnect(), this.recorder.disconnect()
            }, t.prototype.play = function () {
                this.stop(), this.source && this.source.stop(), this.isplaying = !0, this.playTime = 0, this.playAudioData()
            }, t.prototype.getPlayTime = function () {
                var t = 0;
                return (t = this.isplaying ? this.context.currentTime - this.playStamp + this.playTime : this.playTime) >= this.totalPlayTime && (t = this.totalPlayTime), t
            }, t.prototype.pausePlay = function () {
                !this.isrecording && this.isplaying && (this.source && this.source.disconnect(), this.playTime += this.context.currentTime - this.playStamp, this.isplaying = !1)
            }, t.prototype.resumePlay = function () {
                this.isrecording || this.isplaying || 0 === this.playTime || (this.isplaying = !0, this.playAudioData())
            }, t.prototype.stopPlay = function () {
                this.isrecording || (this.playTime = 0, this.isplaying = !1, this.source && this.source.stop())
            }, t.prototype.getWholeData = function () {

                return this.tempPCM
            }, t.prototype.getNextData = function () {
                var t1 = this.tempPCM.length,
                    e1 = this.tempPCM.slice(this.offset)

                if (e1.length) {
                    var e = new ArrayBuffer(e1.length * e1[0].byteLength),
                        i = new DataView(e),
                        n = 0;
                    e1.forEach(function (t) {
                        for (var e = 0, r = t.byteLength; e < r; ++e) i.setInt8(n, t.getInt8(e)), n++
                    }), this.PCM = i, this.tempPCM = []
                }
                if (this.PCM) return this.PCM;
                var r = this.flat();
                r = t.compress(r, this.inputSampleRate, this.outputSampleRate), this.PCM = t.encodePCM(r, this.oututSampleBits, this.littleEdian)
                this.offset = t1
                return new Blob([r])
                // return  e1
            }, t.prototype.playAudioData = function () {
                var e = this;
                this.context.decodeAudioData(this.getWAV().buffer, function (t) {
                    e.source = e.context.createBufferSource(), e.source.buffer = t, e.totalPlayTime = e.source.buffer.duration, e.source.connect(e.analyser), e.analyser.connect(e.context.destination), e.source.start(0, e.playTime), e.playStamp = e.context.currentTime
                }, function (e) {
                    t.throwError(e)
                })
            }, t.prototype.getRecordAnalyseData = function () {
                if (this.ispause) return this.prevDomainData;
                var t = new Uint8Array(this.analyser.frequencyBinCount);
                return this.analyser.getByteTimeDomainData(t), this.prevDomainData = t
            }, t.prototype.getPlayAnalyseData = function () {
                return this.getRecordAnalyseData()
            }, t.prototype.getPCM = function () {
                if (this.tempPCM.length) {
                    var e = new ArrayBuffer(this.tempPCM.length * this.tempPCM[0].byteLength),
                        i = new DataView(e),
                        n = 0;
                    this.tempPCM.forEach(function (t) {
                        for (var e = 0, r = t.byteLength; e < r; ++e) i.setInt8(n, t.getInt8(e)), n++
                    }), this.PCM = i, this.tempPCM = []
                }
                if (this.PCM) return this.PCM;
                var r = this.flat();
                return r = t.compress(r, this.inputSampleRate, this.outputSampleRate), this.PCM = t.encodePCM(r, this.oututSampleBits, this.littleEdian)
            }, t.prototype.getPCMBlob = function () {
                return this.stop(), new Blob([this.getPCM()])
            }, t.prototype.downloadPCM = function (t) {
                void 0 === t && (t = "recorder");
                var e = this.getPCMBlob();
                this.download(e, t, "pcm")
            }, t.prototype.getWAV = function () {
                var e = this.getPCM();
                return t.encodeWAV(e, this.inputSampleRate, this.outputSampleRate, this.config.numChannels, this.oututSampleBits, this.littleEdian)
            }, t.prototype.getWAVBlob = function () {
                return this.stop(), new Blob([this.getWAV()], {
                    type: "audio/wav"
                })
            }, t.prototype.downloadWAV = function (t) {
                void 0 === t && (t = "recorder");
                var e = this.getWAVBlob();
                this.download(e, t, "wav")
            }, t.prototype.transformIntoPCM = function (e, i) {
                var n = new Float32Array(e),
                    r = new Float32Array(i),
                    o = t.compress({
                        left: n,
                        right: r
                    }, this.inputSampleRate, this.outputSampleRate);
                return t.encodePCM(o, this.oututSampleBits, this.littleEdian)
            }, t.prototype.destroy = function () {
                return this.stopStream(), this.closeAudioContext()
            }, t.prototype.stopStream = function () {
                this.stream && this.stream.getTracks && (this.stream.getTracks().forEach(function (t) {
                    return t.stop()
                }), this.stream = null)
            }, t.prototype.closeAudioContext = function () {
                return this.context && this.context.close && "closed" !== this.context.state ? this.context.close() : new Promise(function (t) {
                    t()
                })
            }, t.prototype.download = function (e, i, n) {
                try {
                    var r = document.createElement("a");
                    r.href = window.URL.createObjectURL(e), r.download = i + "." + n, r.click()
                } catch (e) {
                    t.throwError(e)
                }
            }, t.prototype.clear = function () {
                this.lBuffer.length = 0, this.rBuffer.length = 0, this.size = 0, this.fileSize = 0, this.PCM = null, this.audioInput = null, this.duration = 0, this.ispause = !1, this.isplaying = !1, this.playTime = 0, this.totalPlayTime = 0, this.source && (this.source.stop(), this.source = null)
            }, t.prototype.flat = function () {
                var t = null,
                    e = new Float32Array(0);
                1 === this.config.numChannels ? t = new Float32Array(this.size) : (t = new Float32Array(this.size / 2), e = new Float32Array(this.size / 2));
                for (var i = 0, n = 0; n < this.lBuffer.length; n++) t.set(this.lBuffer[n], i), i += this.lBuffer[n].length;
                i = 0;
                for (n = 0; n < this.rBuffer.length; n++) e.set(this.rBuffer[n], i), i += this.rBuffer[n].length;
                return {
                    left: t,
                    right: e
                }
            }, t.playAudio = function (t) {
                var e = document.createElement("audio");
                e.src = window.URL.createObjectURL(t), e.play()
            }, t.compress = function (t, e, i) {
                for (var n = e / i, r = Math.max(n, 1), o = t.left, s = t.right, a = Math.floor((o.length + s.length) / n), u = new Float32Array(a), h = 0, c = 0; h < a;) {
                    var l = Math.floor(c);
                    u[h] = o[l], h++, s.length && (u[h] = s[l], h++), c += r
                }
                return u
            }, t.encodePCM = function (t, e, i) {
                void 0 === i && (i = !0);
                var n = 0,
                    r = t.length * (e / 8),
                    o = new ArrayBuffer(r),
                    s = new DataView(o);
                if (8 === e)
                    for (var a = 0; a < t.length; a++, n++) {
                        var u = (h = Math.max(-1, Math.min(1, t[a]))) < 0 ? 128 * h : 127 * h;
                        u = +u + 128, s.setInt8(n, u)
                    } else
                    for (a = 0; a < t.length; a++, n += 2) {
                        var h = Math.max(-1, Math.min(1, t[a]));
                        s.setInt16(n, h < 0 ? 32768 * h : 32767 * h, i)
                    }
                return s
            }, t.encodeWAV = function (t, e, i, n, o, s) {
                void 0 === s && (s = !0);
                var a = i > e ? e : i,
                    u = o,
                    h = new ArrayBuffer(44 + t.byteLength),
                    c = new DataView(h),
                    l = n,
                    p = 0;
                r(c, p, "RIFF"), p += 4, c.setUint32(p, 36 + t.byteLength, s), r(c, p += 4, "WAVE"), r(c, p += 4, "fmt "), p += 4, c.setUint32(p, 16, s), p += 4, c.setUint16(p, 1, s), p += 2, c.setUint16(p, l, s), p += 2, c.setUint32(p, a, s), p += 4, c.setUint32(p, l * a * (u / 8), s), p += 4, c.setUint16(p, l * (u / 8), s), p += 2, c.setUint16(p, u, s), r(c, p += 2, "data"), p += 4, c.setUint32(p, t.byteLength, s), p += 4;
                for (var f = 0; f < t.byteLength;) c.setUint8(p, t.getUint8(f)), p++, f++;
                return c
            }, t.throwError = function (t) {
                throw new Error(t)
            }, t.initUserMedia = function () {
                void 0 === navigator.mediaDevices && (navigator.mediaDevices = {}), void 0 === navigator.mediaDevices.getUserMedia && (navigator.mediaDevices.getUserMedia = function (t) {
                    var e = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
                    return e ? new Promise(function (i, n) {
                        e.call(navigator, t, i, n)
                    }) : Promise.reject(new Error("浏览器不支持 getUserMedia !"))
                })
            }, t.getPermission = function () {
                return this.initUserMedia(), navigator.mediaDevices.getUserMedia({
                    audio: !0
                }).then(function (t) {
                    t.getTracks().forEach(function (t) {
                        return t.stop()
                    })
                })
            }, t
        }();

        function r(t, e, i) {
            for (var n = 0; n < i.length; n++) t.setUint8(e + n, i.charCodeAt(n))
        }

        e.default = n
    }]).default
});


评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值