当遇到文件太大、网络不好等情况时,如果发生连接中断、挂掉,那整个文件就白传,需要重头再传,这是非常不人性化的用户体验。所以我们可以根据网络情况将文件分成小碎片,最后在服务端将碎片合并,以降低网络传输中断带来的风险。
使用worker开启更多线程,优化上传速度
index.js
const fileDom = document.querySelector('input')
//设置文件分块大小
const CHUNK_SIZE = 2 * 1024
//本机可开启最多线程数量
const MAX_WORKER = navigator.hardwareConcurrency || 4
let finished = 0
const result = []
// 在回调里获取文件总大小,计算分片数量
fileDom.onchange = function (e) {
const file = e.target.files[0]
const chunkLength = Math.ceil(file.size / CHUNK_SIZE)
//计算出每个worker可处理分片数量
const count = Math.ceil(chunkLength / MAX_WORKER)
for (let i = 0; i < MAX_WORKER; i++) {
const myWorker = new Worker('./fileWorker.js', { type: 'module' })
const startIndex = i * count
let endIndex = startIndex + count
if (endIndex > chunkLength) {
endIndex = chunkLength
}
myWorker.postMessage([file, CHUNK_SIZE, startIndex, endIndex])
myWorker.onmessage = function (e) {
//完成一个 关闭一个
finished++
myWorker.terminate()
e.data.forEach(element => {
result[element.index] = element
});
if (finished === MAX_WORKER) {
// 处理后续上传任务
console.log(result, '最后输出result')
}
}
}
}
fileWorker.js
//最后可使用md5文件进行加密 得到hash值
// import SparkMD5 from 'spark-md5'
// addEventListener("message", (e) => {
// console.log(e.data, 'e')
// });
//等价于下面的事件
self.onmessage = async function (e) {
// console.log(e.data, 'e')
const [file, CHUNK_SIZE, startIndex, endIndex] = e.data
const result = []
for (let i = startIndex; i < endIndex; i++) {
const chunk = await getChunk(file, CHUNK_SIZE, i)
result.push(chunk)
}
self.postMessage(result)
}
function getChunk (file, size, index) {
const hash1 = new SparkMD5()
console.log(hash1, 'hash1')
return new Promise((resolve) => {
const start = index * size
const end = start + size
const chunkFile = file.slice(start, end)
const fr = new FileReader()
fr.onload = function (e) {
// const arrBuffer = e.target.result
// const hash = new SparkMD5.ArrayBuffer()
resolve({
start, end, chunkFile, index
})
}
fr.readAsArrayBuffer(chunkFile)
})
}
那个hash值没加上,后续完善