提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
文章目录
前言
此为VUE项目配置科大讯飞语音转写及语音合成功能,通过语音转写将人声转为文字,通过对话库找到对应答复,再有语音合成功能合成回复语音播放,实现智能问答功能。前端实现中语音转写已实现,语音合成目前文字为写死,小小修改即可使用。
一、功能配置
1.引入官方demo中的js
IatRecorder.js:
import CryptoJS from 'crypto-js'
import Worker from './transcode.worker.js'
const APPID = '自己的APPID'
const API_SECRET = '自己的API_SECRET'
const API_KEY = '自己的API_KEY'
let transWorker = new Worker()
console.log(transWorker)
var startTime = ''
var endTime = ''
function getWebSocketUrl () {
return new Promise((resolve, reject) => {
// 请求地址根据语种不同变化
var url = 'wss://iat-api.xfyun.cn/v2/iat'
var host = 'iat-api.xfyun.cn'
var apiKey = API_KEY
var apiSecret = API_SECRET
var date = new Date().toGMTString()
var algorithm = 'hmac-sha256'
var headers = 'host date request-line'
var signatureOrigin = `host: ${host}\ndate: ${date}\nGET /v2/iat HTTP/1.1`
var signatureSha = CryptoJS.HmacSHA256(signatureOrigin, apiSecret)
var signature = CryptoJS.enc.Base64.stringify(signatureSha)
var authorizationOrigin = `api_key="${apiKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`
var authorization = btoa(authorizationOrigin)
url = `${url}?authorization=${authorization}&date=${date}&host=${host}`
resolve(url)
})
}
const IatRecorder = class {
constructor ({language, accent, appId} = {}) {
let self = this
this.status = 'null'
this.language = language || 'zh_cn'
this.accent = accent || 'mandarin'
this.appId = appId || APPID
// 记录音频数据
this.audioData = []
// 记录听写结果
this.resultText = ''
// wpgs下的听写结果需要中间状态辅助记录
this.resultTextTemp = ''
transWorker.onmessage = function (event) {
// console.log("构造方法中",self.audioData)
self.audioData.push(...event.data)
}
}
// 修改录音听写状态
setStatus (status) {
this.onWillStatusChange && this.status !== status && this.onWillStatusChange(this.status, status)
this.status = status
}
setResultText ({resultText, resultTextTemp} = {}) {
this.onTextChange && this.onTextChange(resultTextTemp || resultText || '')
resultText !== undefined && (this.resultText = resultText)
resultTextTemp !== undefined && (this.resultTextTemp = resultTextTemp)
}
// 修改听写参数
setParams ({language, accent} = {}) {
language && (this.language = language)
accent && (this.accent = accent)
}
// 连接websocket
connectWebSocket () {
return getWebSocketUrl().then(url => {
let iatWS
if ('WebSocket' in window) {
iatWS = new WebSocket(url)
} else {
alert('浏览器不支持WebSocket')
return
}
this.webSocket = iatWS
this.setStatus('init')
iatWS.onopen = e => {
this.setStatus('ing')
// 重新开始录音
setTimeout(() => {
this.webSocketSend()
}, 500)
}
iatWS.onmessage = e => {
this.result(e.data)
}
iatWS.onerror = e => {
this.recorderStop()
}
iatWS.onclose = e => {
endTime = Date.parse(new Date())
console.log('持续时间', endTime - startTime)
this.recorderStop()
}
})
}
// 初始化浏览器录音
recorderInit () {
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia
// 创建音频环境
try {
this.audioContext = new (window.AudioContext || window.webkitAudioContext)()
this.audioContext.resume()
if (!this.audioContext) {
alert('浏览器不支持webAudioApi相关接口')
return
}
} catch (e) {
if (!this.audioContext) {
alert('浏览器不支持webAudioApi相关接口')
return
}
}
// 获取浏览器录音权限
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia({
audio: true,
video: false
})
.then(stream => {
getMediaSuccess(stream)
})
.catch(e => {
getMediaFail(e)
})
} else if (navigator.getUserMedia) {
navigator.getUserMedia(
{
audio: true,
video: false
},
stream => {
getMediaSuccess(stream)
},
function (e) {
getMediaFail(e)
}
)
} else {
if (navigator.userAgent.toLowerCase().match(/chrome/) && location.origin.indexOf('https://') < 0) {
alert('chrome下获取浏览器录音功能,因为安全性问题,需要在localhost或127.0.0.1或https下才能获取权限')
} else {
alert('无法获取浏览器录音功能,请升级浏览器或使用chrome')
}
this.audioContext && this.audioContext.close()
return
}
// 获取浏览器录音权限成功的回调
let getMediaSuccess = stream => {
// 创建一个用于通过JavaScript直接处理音频
this.scriptProcessor = this.audioContext.createScriptProcessor(0, 1, 1)
this.scriptProcessor.onaudioprocess = e => {
// 去处理音频数据
if (this.status === 'ing') {
transWorker.postMessage(e.inputBuffer.getChannelData(0))
// this.audioData.push(e.inputBuffer.getChannelData(0))
}
}
// 创建一个新的MediaStreamAudioSourceNode 对象,使来自MediaStream的音频可以被播放和操作
this.mediaSource = this.audioContext.createMediaStreamSource(stream)
// 连接
this.mediaSource.connect(this.scriptProcessor)
this.scriptProcessor.connect(this.audioContext.destination)
this.connectWebSocket()
}
let getMediaFail = (e) => {
this.audioContext && this.audioContext.close()
this.audioContext = undefined
// 关闭websocket
if (this.webSocket && this.webSocket.readyState === 1) {
this.webSocket.close()
}
}
}
recorderStart () {
if (!this.audioContext) {
this.recorderInit()
} else {
this.audioContext.resume()
this.connectWebSocket()
}
}
// 暂停录音
recorderStop () {
// safari下suspend后再次resume录音内容将是空白,设置safari下不做suspend
if (!(/Safari/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgen))) {
this.audioContext && this.audioContext.suspend()
}
this.setStatus('end')
}
// 处理音频数据
// transAudioData(audioData) {
// audioData = transAudioData.transaction(audioData)
// this.audioData.push(...audioData)
// }
// 对处理后的音频数据进行base64编码,
toBase64 (buffer) {
var binary = ''
var bytes = new Uint8Array(buffer)
var len = bytes.byteLength
for (var i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i])
}
return window.btoa(binary)
}
// 向webSocket发送数据
webSocketSend () {
if (this.webSocket.readyState !== 1) {
return
}
let audioData = this.audioData.splice(0, 1280)
var params = {
common: {
app_id: this.appId
},
business: {
language: this.language, // 小语种可在控制台--语音听写(流式)--方言/语种处添加试用
domain: 'iat',
accent: this.accent // 中文方言可在控制台--语音听写(流式)--方言/语种处添加试用
},
data: {
status: 0,
format: 'audio/L16;rate=16000',
encoding: 'raw',
audio: this.toBase64(audioData)
}
}
console.log('参数language:', this.language)
console.log('参数accent:', this.accent)
this.webSocket.send(JSON.stringify(params))
startTime = Date.parse(new Date())
this.handlerInterval = setInterval(() => {
// websocket未连接
if (this.webSocket.readyState !== 1) {
console.log('websocket未连接')
this.audioData = []
clearInterval(this.handlerInterval)
return
}
if (this.audioData.length === 0) {
console.log('自动关闭', this.status)
if (this.status === 'end') {
this.webSocket.send(
JSON.stringify({
data: {
status: 2,
format: 'audio/L16;rate=16000',
encoding: 'raw',
audio: ''
}
})
)
this.audioData = []
clearInterval(this.handlerInterval)
}
return false
}
audioData = this.audioData.splice(0, 1280)
// 中间帧
this.webSocket.send(
JSON.stringify({
data: {
status: 1,
format: 'audio/L16;rate=16000',
encoding: 'raw',
audio: this.toBase64(audioData)
}
})
)
}, 40)
}
result (resultData) {
// 识别结束
let jsonData = JSON.parse(resultData)
if (jsonData.data && jsonData.data.result) {
let data = jsonData.data.result
let ws = data.ws
let str = ''
for (let i = 0; i < ws.length; i++) {
str = str + ws[i].cw[0].w
}
console.log('识别的结果为:', str)
// 开启wpgs会有此字段(前提:在控制台开通动态修正功能)
// 取值为 "apd"时表示该片结果是追加到前面的最终结果;取值为"rpl" 时表示替换前面的部分结果,替换范围为rg字段
if (data.pgs) {
if (data.pgs === 'apd') {
// 将resultTextTemp同步给resultText
this.setResultText({
resultText: this.resultTextTemp
})
}
// 将结果存储在resultTextTemp中
this.setResultText({
resultTextTemp: this.resultText + str
})
} else {
this.setResultText({
resultText: this.resultText + str
})
}
}
if (jsonData.code === 0 && jsonData.data.status === 2) {
this.webSocket.close()
}
if (jsonData.code !== 0) {
this.webSocket.close()
console.log(`${jsonData.code}:${jsonData.message}`)
}
}
start () {
this.setResultText({resultText: '', resultTextTemp: ''})
this.recorderStart()
}
stop () {
this.recorderStop()
}
}
export default IatRecorder
transcode.worker.js:
self.onmessage = function (e) {
transAudioData.transcode(e.data)
}
let transAudioData = {
transcode (audioData) {
let output = transAudioData.to16kHz(audioData)
output = transAudioData.to16BitPCM(output)
output = Array.from(new Uint8Array(output.buffer))
self.postMessage(output)
// return output
},
to16kHz (audioData) {
var data = new Float32Array(audioData)
var fitCount = Math.round(data.length * (16000 / 44100))
var newData = new Float32Array(fitCount)
var springFactor = (data.length - 1) / (fitCount - 1)
newData[0] = data[0]
for (let i = 1; i < fitCount - 1; i++) {
var tmp = i * springFactor
var before = Math.floor(tmp).toFixed()
var after = Math.ceil(tmp).toFixed()
var atPoint = tmp - before
newData[i] = data[before] + (data[after] - data[before]) * atPoint
}
newData[fitCount - 1] = data[data.length - 1]
return newData
},
to16BitPCM (input) {
var dataLength = input.length * (16 / 8)
var dataBuffer = new ArrayBuffer(dataLength)
var dataView = new DataView(dataBuffer)
var offset = 0
for (var i = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]))
dataView.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true)
}
return dataView
}
}
audio.js:
/*
* @Autor: lycheng
* @Date: 2020-01-13 16:12:22
*/
/**
* Created by iflytek on 2019/11/19.
*
* 在线语音合成调用demo
* 此demo只是一个简单的调用示例,不适合用到实际生产环境中
*
* 在线语音合成 WebAPI 接口调用示例 接口文档(必看):https://www.xfyun.cn/doc/tts/online_tts/API.html
* 错误码链接:
* https://www.xfyun.cn/doc/tts/online_tts/API.html
* https://www.xfyun.cn/document/error-code (code返回错误码时必看)
*
*/
// 1. websocket连接:判断浏览器是否兼容,获取websocket url并连接,这里为了方便本地生成websocket url
// 2. 连接websocket,向websocket发送数据,实时接收websocket返回数据
// 3. 处理websocket返回数据为浏览器可以播放的音频数据
// 4. 播放音频数据
// ps: 该示例用到了es6中的一些语法,建议在chrome下运行
// import {downloadPCM, downloadWAV} from 'js/download.js'
import CryptoJS from 'crypto-js'
import TransWorker from './transcode2.worker.js'
// import VConsole from 'vconsole'
import {Base64} from 'js-base64'
// import './index.css'
let transWorker = new TransWorker()
// APPID,APISecret,APIKey在控制台-我的应用-语音合成(流式版)页面获取
const APPID = '你的APPID'
const API_SECRET = '你的API_SECRET'
const API_KEY = '你的API_KEY'
function getWebsocketUrl () {
return new Promise((resolve, reject) => {
var apiKey = API_KEY
var apiSecret = API_SECRET
var url = 'wss://tts-api.xfyun.cn/v2/tts'
var host = location.host
var date = new Date().toGMTString()
var algorithm = 'hmac-sha256'
var headers = 'host date request-line'
var signatureOrigin = `host: ${host}\ndate: ${date}\nGET /v2/tts HTTP/1.1`
var signatureSha = CryptoJS.HmacSHA256(signatureOrigin, apiSecret)
var signature = CryptoJS.enc.Base64.stringify(signatureSha)
var authorizationOrigin = `api_key="${apiKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`
var authorization = btoa(authorizationOrigin)
url = `${url}?authorization=${authorization}&date=${date}&host=${host}`
resolve(url)
})
}
const TTSRecorder = class {
constructor ({
speed = 30,
voice = 50,
pitch = 50,
voiceName = 'xiaoyan',
appId = APPID,
text = '',
tte = 'UTF8',
defaultText = '请输入您要合成的文本'
} = {}) {
this.speed = speed
this.voice = voice
this.pitch = pitch
this.voiceName = voiceName
this.text = text
this.tte = tte
this.defaultText = defaultText
this.appId = appId
this.audioData = []
this.rawAudioData = []
this.audioDataOffset = 0
this.status = 'init'
transWorker.onmessage = (e) => {
this.audioData.push(...e.data.data)
this.rawAudioData.push(...e.data.rawAudioData)
}
}
// 修改录音听写状态
setStatus (status) {
this.onWillStatusChange && this.onWillStatusChange(this.status, status)
this.status = status
}
// 设置合成相关参数
setParams ({speed, voice, pitch, text, voiceName, tte}) {
speed !== undefined && (this.speed = speed)
voice !== undefined && (this.voice = voice)
pitch !== undefined && (this.pitch = pitch)
text && (this.text = text)
tte && (this.tte = tte)
voiceName && (this.voiceName = voiceName)
this.resetAudio()
}
// 连接websocket
connectWebSocket () {
this.setStatus('ttsing')
return getWebsocketUrl().then(url => {
let ttsWS
if ('WebSocket' in window) {
ttsWS = new WebSocket(url)
} else {
alert('浏览器不支持WebSocket')
return
}
this.ttsWS = ttsWS
ttsWS.onopen = e => {
this.webSocketSend()
this.playTimeout = setTimeout(() => {
this.audioPlay()
}, 1000)
}
ttsWS.onmessage = e => {
this.result(e.data)
}
ttsWS.onerror = e => {
clearTimeout(this.playTimeout)
this.setStatus('errorTTS')
alert('WebSocket报错,请f12查看详情')
console.error(`详情查看:${encodeURI(url.replace('wss:', 'https:'))}`)
}
ttsWS.onclose = e => {
// console.log(e)
}
})
}
// 处理音频数据
transToAudioData (audioData) {
}
// websocket发送数据
webSocketSend () {
var params = {
common: {
app_id: this.appId // APPID
},
business: {
aue: 'raw',
// sfl= 1,
auf: 'audio/L16;rate=16000',
vcn: this.voiceName,
speed: this.speed,
volume: this.voice,
pitch: this.pitch,
bgs: 0,
tte: this.tte
},
data: {
status: 2,
text: this.encodeText(
this.text || this.defaultText,
this.tte === 'unicode' ? 'base64&utf16le' : ''
)
}
}
this.ttsWS.send(JSON.stringify(params))
}
encodeText (text, encoding) {
switch (encoding) {
case 'utf16le' : {
let buf = new ArrayBuffer(text.length * 4)
let bufView = new Uint16Array(buf)
for (let i = 0, strlen = text.length; i < strlen; i++) {
bufView[i] = text.charCodeAt(i)
}
return buf
}
case 'buffer2Base64': {
let binary = ''
let bytes = new Uint8Array(text)
let len = bytes.byteLength
for (let i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i])
}
return window.btoa(binary)
}
case 'base64&utf16le' : {
return this.encodeText(this.encodeText(text, 'utf16le'), 'buffer2Base64')
}
default : {
return Base64.encode(text)
}
}
}
// websocket接收数据的处理
result (resultData) {
let jsonData = JSON.parse(resultData)
// 合成失败
if (jsonData.code !== 0) {
alert(`合成失败: ${jsonData.code}:${jsonData.message}`)
console.error(`${jsonData.code}:${jsonData.message}`)
this.resetAudio()
return
}
transWorker.postMessage(jsonData.data.audio)
if (jsonData.code === 0 && jsonData.data.status === 2) {
this.ttsWS.close()
}
}
// 重置音频数据
resetAudio () {
this.audioStop()
this.setStatus('init')
this.audioDataOffset = 0
this.audioData = []
this.rawAudioData = []
this.ttsWS && this.ttsWS.close()
clearTimeout(this.playTimeout)
}
// 音频初始化
audioInit () {
let AudioContext = window.AudioContext || window.webkitAudioContext
if (AudioContext) {
this.audioContext = new AudioContext()
this.audioContext.resume()
this.audioDataOffset = 0
}
}
// 音频播放
audioPlay () {
this.setStatus('play')
let audioData = this.audioData.slice(this.audioDataOffset)
this.audioDataOffset += audioData.length
let audioBuffer = this.audioContext.createBuffer(1, audioData.length, 22050)
let nowBuffering = audioBuffer.getChannelData(0)
if (audioBuffer.copyToChannel) {
audioBuffer.copyToChannel(new Float32Array(audioData), 0, 0)
} else {
for (let i = 0; i < audioData.length; i++) {
nowBuffering[i] = audioData[i]
}
}
let bufferSource = this.bufferSource = this.audioContext.createBufferSource()
bufferSource.buffer = audioBuffer
bufferSource.connect(this.audioContext.destination)
bufferSource.start()
bufferSource.onended = event => {
if (this.status !== 'play') {
return
}
if (this.audioDataOffset < this.audioData.length) {
this.audioPlay()
} else {
this.audioStop()
}
}
}
// 音频播放结束
audioStop () {
this.setStatus('endPlay')
clearTimeout(this.playTimeout)
this.audioDataOffset = 0
if (this.bufferSource) {
try {
this.bufferSource.stop()
} catch (e) {
// console.log(e)
}
}
}
start () {
if (this.audioData.length) {
this.audioPlay()
} else {
if (!this.audioContext) {
this.audioInit()
}
if (!this.audioContext) {
alert('该浏览器不支持webAudioApi相关接口')
return
}
this.connectWebSocket()
}
}
stop () {
this.audioStop()
}
}
export default TTSRecorder
transcode2.worker.js:
/*
* @Autor: lycheng
* @Date: 2020-01-13 16:12:22
*/
const TransWorker = (function () {
self.onmessage = function (e) {
transcode.transToAudioData(e.data)
}
var transcode = {
transToAudioData: function (audioDataStr, fromRate = 16000, toRate = 22505) {
let outputS16 = transcode.base64ToS16(audioDataStr)
let output = transcode.transS16ToF32(outputS16)
output = transcode.transSamplingRate(output, fromRate, toRate)
output = Array.from(output)
self.postMessage({
data: output,
rawAudioData: Array.from(outputS16)
})
},
transSamplingRate: function (data, fromRate = 44100, toRate = 16000) {
var fitCount = Math.round(data.length * (toRate / fromRate))
var newData = new Float32Array(fitCount)
var springFactor = (data.length - 1) / (fitCount - 1)
newData[0] = data[0]
for (let i = 1; i < fitCount - 1; i++) {
var tmp = i * springFactor
var before = Math.floor(tmp).toFixed()
var after = Math.ceil(tmp).toFixed()
var atPoint = tmp - before
newData[i] = data[before] + (data[after] - data[before]) * atPoint
}
newData[fitCount - 1] = data[data.length - 1]
return newData
},
transS16ToF32: function (input) {
var tmpData = []
for (let i = 0; i < input.length; i++) {
var d = input[i] < 0 ? input[i] / 0x8000 : input[i] / 0x7fff
tmpData.push(d)
}
return new Float32Array(tmpData)
},
base64ToS16: function (base64AudioData) {
base64AudioData = atob(base64AudioData)
const outputArray = new Uint8Array(base64AudioData.length)
for (let i = 0; i < base64AudioData.length; ++i) {
outputArray[i] = base64AudioData.charCodeAt(i)
}
return new Int16Array(new DataView(outputArray.buffer).buffer)
}
}
})()
export default TransWorker
2.页面调用
代码如下:
<template>
<div class="body">
<p>按住录制音频</p>
<!-- <img src="../../static/image/timg.gif" alt="" @click="translationStart" >-->
<!-- <span @click="translationStart">停止</span>-->
<button class="taste-button ready-button" @click="translationStart">开始</button>
<button class="taste-button ready-button" @click="translationEnd">结束</button>
<div class="contert">
<button @click="play">开始合成</button>
<button @click="pause">停止播放</button>
</div>
</div>
</template>
<script>
import IatRecorder from '@/assets/js/IatRecorder'
import TtsRecorder from '@/assets/js/audio'
const ttsRecorder = new TtsRecorder()
const iatRecorder = new IatRecorder('en_us', 'mandarin', '5fe58946')//
export default {
data () {
return {}
},
mounted () {
},
created () {
},
methods: {
translationStart () {
iatRecorder.start()
},
translationEnd () {
iatRecorder.stop()
},
play () {
// 要合成的文本
ttsRecorder.setParams({
// 文本内容
text: '1',
// 角色
// voiceName: '',
// 语速
speed: 50,
// 音量
voice: 50
})
ttsRecorder.start()
},
pause () {
ttsRecorder.stop()
}
}
}
</script>
<style scoped>
.body {
user-select: none;
}
audio {
display: block;
margin-bottom: 10px;
}
#audio-container {
padding: 20px 0;
}
.ui-btn {
display: inline-block;
padding: 5px 20px;
font-size: 14px;
line-height: 1.428571429;
box-sizing: content-box;
text-align: center;
border: 1px solid #e8e8e8;
border-radius: 3px;
color: #555;
background-color: #fff;
border-color: #e8e8e8;
white-space: nowrap;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
.ui-btn:hover,
.ui-btn.hover {
color: #333;
text-decoration: none;
background-color: #f8f8f8;
border: 1px solid #ddd;
}
.ui-btn:focus,
.ui-btn:active {
color: #333;
outline: 0;
}
.ui-btn.disabled,
.ui-btn.disabled:hover,
.ui-btn.disabled:active,
.ui-btn[disabled],
.ui-btn[disabled]:hover,
.ui-state-disabled .ui-btn {
cursor: not-allowed;
background-color: #eee;
border-color: #eee;
color: #aaa;
}
.ui-btn-primary {
color: #fff;
background-color: #39b54a;
border-color: #39b54a;
position: fixed;
bottom: 1.5rem;
width: 80%;
margin-left: 10%;
padding: 0.5rem 0;
}
.ui-btn-primary:hover,
.ui-btn-primary.hover {
color: #fff;
background-color: #16a329;
border-color: #16a329;
}
.ui-btn-primary:focus,
.ui-btn-primary:active {
color: #fff;
}
.ui-btn-primary.disabled:focus {
color: #aaa;
}
img {
display: block;
width: 40%;
margin: auto;
}
body {
margin: 0;
padding: 0;
}
#mask {
width: 43%;
background: rgba(0, 0, 0, 0.05);
padding: 3rem 0 1rem 0;
display: none;
margin: 2rem auto;
margin-top: 51%;
}
#mask p {
text-align: center;
font-size: 0.8rem;
color: rgba(0, 0, 0, 0.5);
}
</style>
二、踩坑+填坑
1.new Worker()报错
IatRecorder .js中let transWorker = new Worker()报错,原因是VUE项目无法直接使用new Worker这一特性。直接上解决方案:
首先安装worker-loader的2.0.0版本:
npm install worker-loader@2.0.0 -D
config下的index.js添加配置:
configureWebpack: config => {
config.module.rules.push({
test: /\.worker.js$/,
use: {
loader: 'worker-loader',
options: { inline: true, name: 'workerName.[hash].js' }
}
})
},
parallel: false,
chainWebpack: config => {
config.output.globalObject('this')
}
bulid下webpack.base.conf.js文件增加配置:
{
test: /\.worker\.js$/,
use: {
loader: 'worker-loader',
}
}
2.webpack版本问题
webpack版本我最后使用的4.44.1,对应webpack-dev-server等版本也尽量修改为适配的版本,更换版本后,重新执行npm install,如果出现错误,手动删除node_modules文件夹,再次执行即可。
package.json代码如下:
"dependencies": {
"@amap/amap-jsapi-loader": "^0.0.1",
"axios": "^0.19.0",
"babel-plugin-syntax-dynamic-import": "^6.18.0",
"crypto-js": "^4.0.0",
"dayjs": "^1.9.6",
"echarts": "^4.2.1",
"element-ui": "^2.13.1",
"jquery": "^3.5.1",
"recorder-core": "^1.1.20112600",
"vue": "^2.6.10",
"vue-cookies": "^1.5.13",
"vue-router": "^3.0.1",
"vue-uuid": "^2.0.2",
"vxe-table": "^1.5.15",
"vxe-table-plugin-element": "^1.1.1",
"webpack-dev-server": "^3.11.0",
"xe-utils": "^1.9.6"
},
"devDependencies": {
"address": "^1.0.3",
"autoprefixer": "^7.1.2",
"babel-core": "^6.22.1",
"babel-eslint": "^8.2.1",
"babel-helper-vue-jsx-merge-props": "^2.0.3",
"babel-loader": "^7.1.1",
"babel-plugin-syntax-jsx": "^6.18.0",
"babel-plugin-transform-runtime": "^6.22.0",
"babel-plugin-transform-vue-jsx": "^3.5.0",
"babel-preset-env": "^1.3.2",
"babel-preset-stage-2": "^6.22.0",
"chalk": "^2.0.1",
"copy-webpack-plugin": "^4.0.1",
"css-loader": "^0.28.0",
"eslint": "^4.15.0",
"eslint-config-standard": "^10.2.1",
"eslint-friendly-formatter": "^3.0.0",
"eslint-loader": "^2.1.2",
"eslint-plugin-import": "^2.7.0",
"eslint-plugin-node": "^5.2.0",
"eslint-plugin-promise": "^3.4.0",
"eslint-plugin-standard": "^3.0.1",
"eslint-plugin-vue": "^4.0.0",
"extract-text-webpack-plugin": "^3.0.0",
"file-loader": "^1.1.4",
"friendly-errors-webpack-plugin": "^1.6.1",
"html-webpack-plugin": "^3.2.0",
"mockjs": "^1.0.1-beta3",
"node-notifier": "^5.1.2",
"node-sass": "^4.12.0",
"optimize-css-assets-webpack-plugin": "^3.2.0",
"ora": "^1.2.0",
"portfinder": "^1.0.13",
"postcss-import": "^11.0.0",
"postcss-loader": "^2.0.8",
"postcss-url": "^7.2.1",
"rimraf": "^2.6.0",
"sass-loader": "^7.1.0",
"semver": "^5.3.0",
"shelljs": "^0.7.6",
"uglifyjs-webpack-plugin": "^1.1.1",
"url-loader": "^0.5.8",
"vue-loader": "^15.9.3",
"vue-style-loader": "^3.0.1",
"vue-template-compiler": "^2.5.2",
"webpack": "^4.44.1",
"webpack-bundle-analyzer": "^2.9.0",
"webpack-cli": "^3.3.12",
"webpack-merge": "^4.1.0",
"worker-loader": "^2.0.0"
}
3.混用import 以及module.exports报错问题
启动项目前台报错:Uncaught TypeError: Cannot assign to read only property’exports‘ of object’#‘。
找到项目中的.babelrc文件,plugins增加配置transform-es2015-modules-commonjs。
"plugins": ["transform-vue-jsx", "transform-runtime","transform-es2015-modules-commonjs"]