讯飞文字转语音示例react

使用讯飞在线语音合成插件,实现多个音频合并输出URL;

index.tsx

import { FC, useRef } from 'react';

import { useMemoizedFn } from 'ahooks';

import { connectWebSocket, textToSpeech } from './audioPlayer';

/**
 * audio/video 常用方法属性
 *  volume: 非只读,获取或者修改当前音频对象的音量值,它的值的范围在0–1之间。
 *  autoplay: 非只读,修改或返回当前标签的是否自动播放的属性值。值为true/false
 *  paused: 判断当前音频是否暂停
 *  currentTime: 获取当前音频的播放进度,以秒为单位,小数点后保留六位小数,是制作自己的播放器的进度条的重要组成部分。
 *  duration: 返回当前音频的总长度。以秒为单位。
 *  ended: 判断当前的音频是否播放完成。值为true/false
 *  loop: 非只读,修改或返回当前音频是否要在结束的时候从头开始播放,值为true/false
 *  currentSrc: 返回当前音频的资源地址,是只读的
 *  muted: 非只读修改或返回当前音频是否静音,值为true/false
 *  controls: 非只读,判断或修改是否使用浏览器原生控件,值为true/fals
 *
 */

interface audioProps {
  handleCanplay?: Function;
  handleDurationchange?: Function;
  handleTimeUpdate?: Function;
  handleEnd?: Function;
}

const AudioPlayer: FC<audioProps> = ({
  handleCanplay: handleAudioCanPlayer,
  handleDurationchange: handleAudioDurationchange,
  handleTimeUpdate: handleTimeUpdateChange,
  handleEnd: handleAudioEnd,
}) => {
  const audioPlayer = useRef<any>();

  const handlePlayAudio = useMemoizedFn(() => {
    connectWebSocket('', (url: any) => {
      audioPlayer.current.src = url;
    });
  });

  const handleTextToSpeech = useMemoizedFn(() => {
    textToSpeech('', (url: string) => {
      audioPlayer.current.src = url;
    });
  });

  // 音/视频加载完成准备播放
  const handleCanplay = useMemoizedFn(() => {
    handleAudioCanPlayer?.(audioPlayer.current);
  });

  // 音/视频播放时长发生变化
  const handleDurationchange = useMemoizedFn((durationTime) => {
    handleAudioDurationchange?.(audioPlayer.current, durationTime);
  });

  // 音/视频播放中触发
  const handleTimeUpdate = useMemoizedFn((time) => {
    handleTimeUpdateChange?.(audioPlayer.current, time);
  });

  // 音/视频播放结束
  const handleEnd = useMemoizedFn((time) => {
    handleAudioEnd?.(audioPlayer.current, time);
  });
  return (
    <>
      <audio
        src="https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3"
        controls
        // muted
        preload="auto"
        id="audioId"
        ref={audioPlayer}
        onCanPlay={handleCanplay}
        onDurationChange={handleDurationchange}
        onTimeUpdate={handleTimeUpdate}
        onEnded={handleEnd}
      >
        浏览器不支持音频播放。
      </audio>

      {/* @ts-ignore */}
      <button onClick={handleTextToSpeech}>百度textToSpeech</button>

      <div>
        <textarea id="textarea" style={{ height: '200px', width: '500px' }} placeholder="请输入您要合成的文本">
          春江花月夜 春江潮水连海平,海上明月共潮生。 滟滟随波千万里,何处春江无月明。 江流宛转绕芳甸,月照花林皆似霰。
          空里流霜不觉飞,汀上白沙看不见。 江天一色无纤尘,皎皎空中孤月轮。 江畔何人初见月?江月何年初照人?
        </textarea>
      </div>

      <div>
        <button id="controll_tts" onClick={handlePlayAudio}>
          立即合成
        </button>
      </div>
    </>
  );
};

export default AudioPlayer;

audioPlayer.js

// import { useLocation } from 'react-router-dom';
import CryptoJS from 'crypto-js';
import { Base64 } from 'js-base64';

const APPID = 'xxxxxxxx';
const API_SECRET = 'xxxxxxxxxx';
const API_KEY = 'xxxxxxxxx';

// 获取讯飞语音权限签名
function getWebSocketUrl(apiKey, apiSecret) {
  var url = 'wss://tts-api.xfyun.cn/v2/tts';
  // var host = window.location.host;
  var host = '127.0.0.1:5500';

  var date = new Date().toGMTString();
  var algorithm = 'hmac-sha256';
  var headers = 'host date request-line';
  var signatureOrigin = `host: ${host}\ndate: ${date}\nGET /v2/tts HTTP/1.1`;
  var signatureSha = CryptoJS.HmacSHA256(signatureOrigin, apiSecret);
  var signature = CryptoJS.enc.Base64.stringify(signatureSha);

  var authorizationOrigin = `api_key="${apiKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`;
  var authorization = btoa(authorizationOrigin);
  url = `${url}?authorization=${authorization}&date=${date}&host=${host}`;
  return url;
}

function encodeText(text, type) {
  if (type === 'unicode') {
    let buf = new ArrayBuffer(text.length * 4);
    let bufView = new Uint16Array(buf);
    for (let i = 0, strlen = text.length; i < strlen; i++) {
      bufView[i] = text.charCodeAt(i);
    }
    let binary = '';
    let bytes = new Uint8Array(buf);
    let len = bytes.byteLength;
    for (let i = 0; i < len; i++) {
      binary += String.fromCharCode(bytes[i]);
    }
    return window.btoa(binary);
  } else {
    return Base64.encode(text);
  }
}

/**
 * WebSocket: 传入文本解析出语音mp3
 * @param textarea:文本内容
 * @param configs:语音参数
 * @param callback: 回调函数,返回url播放链接
 */

let ttsWS,
  audioData = [];
export const connectWebSocket = (textarea = '', callback, configs = {}) => {
  audioData = [];
  const url = getWebSocketUrl(API_KEY, API_SECRET);

  if ('WebSocket' in window) {
    ttsWS = new WebSocket(url);
  } else {
    alert('浏览器不支持WebSocket');
    return;
  }

  ttsWS.onopen = (e) => {
    var text = textarea || document.getElementById('textarea').value.trim() || '请输入您要合成的文本';
    var tte = 'UTF8';
    var params = {
      common: {
        app_id: APPID,
      },
      // 配置文档: https://www.xfyun.cn/doc/tts/online_tts/API.html#%E6%8E%A5%E5%8F%A3%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B
      business: {
        aue: 'lame',
        sfl: 1,
        // aue: 'raw',
        auf: 'audio/L16;rate=16000',
        bgs: 1,
        volume: 100,
        tte,
        ...configs,
      },
      data: {
        status: 2,
        text: encodeText(text, tte),
      },
    };
    ttsWS.send(JSON.stringify(params));
  };

  ttsWS.onmessage = (e) => {
    let jsonData = JSON.parse(e.data);
    // 合成失败
    if (jsonData.code !== 0) {
      console.error('语音合成失败===', jsonData);
      return;
    } else {
      // console.log('语音合成success===', jsonData);
      audioData.push(jsonData.data.audio);
    }

    if (jsonData.code === 0 && jsonData.data.status === 2) {
      ttsWS.close();
      // audio.src = 'data:audio/wav;base64,' + jsonData.data.audio;

      decodeAndConcatAudioFiles(audioData).then((url) => {
        callback?.(url);
      });
    }
    ttsWS.onerror = (e) => {
      console.error('WebSocket链接失败===', e);
    };
    ttsWS.onclose = (e) => {};
  };
};

// 直接调用有道API语音在线合成
export const textToSpeech = (textarea = '', callback) => {
  var text = textarea || document.getElementById('textarea').value.trim() || '请输入您要合成的文本';
  // audio.src = "http://tts.baidu.com/text2audio?lan=zh&ie=UTF-8&text='" + encodeURI(textarea); //失效
  var url = `https://dict.youdao.com/dictvoice?audio=${encodeURI(text)}&le=zh`;
  callback?.(url);
};

// 创建AudioContext对象
const AudioContext = window.AudioContext || window.webkitAudioContext;
const audioContext = new AudioContext();
audioContext.resume();

/**
 * 解码并拼接音频文件
 * audioFiles: 需要拼接的音频文件的base64格式数据集合
 */
async function decodeAndConcatAudioFiles(audioFiles) {
  const audioBuffers = []; // 存储解码后的音频数据的数组

  for (let i = 0; i < audioFiles.length; i++) {
    const base64Data = audioFiles[i];

    // 将base64数据解码为ArrayBuffer格式
    const arrayBuffer = Uint8Array.from(atob(base64Data), (c) => c.charCodeAt(0)).buffer;

    // 解码ArrayBuffer格式的音频数据
    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);

    // 将解码后的音频数据存储到数组中
    audioBuffers.push(audioBuffer);
  }

  const concatAudioBuffer = concatAudio(audioBuffers); //将多个buffer音频片段拼接成一个完整音频
  const newAudioSrc = URL.createObjectURL(bufferToWave(concatAudioBuffer, concatAudioBuffer.length));

  return newAudioSrc; //生成最终audio播放地址
}

// 拼接音频的方法,参考文档:https://www.zhangxinxu.com/wordpress/2023/10/js-audio-audiobuffer-concat-merge/
export const concatAudio = (arrBufferList) => {
  // 获得 AudioBuffer
  const audioBufferList = arrBufferList;
  // 最大通道数
  const maxChannelNumber = Math.max(...audioBufferList.map((audioBuffer) => audioBuffer.numberOfChannels));
  // 总长度
  const totalLength = audioBufferList.map((buffer) => buffer.length).reduce((lenA, lenB) => lenA + lenB, 0);

  // 创建一个新的 AudioBuffer
  const newAudioBuffer = audioContext.createBuffer(maxChannelNumber, totalLength, audioBufferList[0].sampleRate);
  // 将所有的 AudioBuffer 的数据拷贝到新的 AudioBuffer 中
  let offset = 0;

  audioBufferList.forEach((audioBuffer, index) => {
    for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
      newAudioBuffer.getChannelData(channel).set(audioBuffer.getChannelData(channel), offset);
    }

    offset += audioBuffer.length;
  });

  return newAudioBuffer;
};

// AudioBuffer 转 blob
function bufferToWave(abuffer, len) {
  var numOfChan = abuffer.numberOfChannels,
    length = len * numOfChan * 2 + 44,
    buffer = new ArrayBuffer(length),
    view = new DataView(buffer),
    channels = [],
    i,
    sample,
    offset = 0,
    pos = 0;

  // write WAVE header
  // "RIFF"
  setUint32(0x46464952);
  // file length - 8
  setUint32(length - 8);
  // "WAVE"
  setUint32(0x45564157);
  // "fmt " chunk
  setUint32(0x20746d66);
  // length = 16
  setUint32(16);
  // PCM (uncompressed)
  setUint16(1);
  setUint16(numOfChan);
  setUint32(abuffer.sampleRate);
  // avg. bytes/sec
  setUint32(abuffer.sampleRate * 2 * numOfChan);
  // block-align
  setUint16(numOfChan * 2);
  // 16-bit (hardcoded in this demo)
  setUint16(16);
  // "data" - chunk
  setUint32(0x61746164);
  // chunk length
  setUint32(length - pos - 4);

  // write interleaved data
  for (i = 0; i < abuffer.numberOfChannels; i++) channels.push(abuffer.getChannelData(i));

  while (pos < length) {
    // interleave channels
    for (i = 0; i < numOfChan; i++) {
      // clamp
      sample = Math.max(-1, Math.min(1, channels[i][offset]));
      // scale to 16-bit signed int
      sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767) | 0;
      // write 16-bit sample
      view.setInt16(pos, sample, true);
      pos += 2;
    }
    // next source sample
    offset++;
  }

  // create Blob
  return new Blob([buffer], { type: 'audio/wav' });

  function setUint16(data) {
    view.setUint16(pos, data, true);
    pos += 2;
  }

  function setUint32(data) {
    view.setUint32(pos, data, true);
    pos += 4;
  }
}
ReactDOM.createPortal(child, container)  //向指定节点中插入dom

  • 5
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值