现需要做大于1G文件的上传于是就考虑需要切片及断点续传等功能,需和后端友好配合。。。
直接上代码!!!!
技术栈实现:vue2 element minio springboot
<div v-show="uploadLoading" style="width: 100%">
<div class="progress-block">
<el-progress
:text-inside="true"
:stroke-width="16"
:percentage="percentage"
></el-progress>
</div>
</div>
<el-upload
:action="uploadUrl"
:accept="accept"
:limit="limit"
:on-exceed="handleExceed"
:before-upload="beforeUpload"
:headers="headers"
:show-file-list="false"
:http-request="customHttpRequest" //自定义上传方法
:file-list="fileList"
class="upload-demo"
:multiple="multiple"
ref="batchUpload"
:disabled="disabled"
/>
<slot name="filePreviewComponent"></slot>
<draggable
class="upload-list"
v-model="fileArr"
filter=".forbid"
animation="300"
@end="onMoveEnd"
>
<!-- 自定义的附件列表 -->
<transition-group>
<div
v-for="(item, index) in fileArr"
:key="index"
class="upload-list-item"
>
<div :title="item.originalName" class="upload-list-item-title">
<img
style="margin-right: 5px"
:src="showTypeTip(item)"
width="15px"
height="18px"
alt=""
/>
<span>{{ item.originalName }}</span>
</div>
<div class="upload_options">
<span class="preview-class" @click="preview(item)">
<i class="el-icon-view"></i>
预览</span
>
</div>
</div>
</transition-group>
</draggable>
<script>
//上传逻辑采用mixins混入,方便其他附件组件复用
import { webUploaderMixin } from "@/components/upload/webUploader.js";
export default {
mixins: [webUploaderMixin],
}
</script>
webUploaderMixin.js
// 相关依赖需要下载
import md5 from "@/utils/md5.js"; //计算文件的md5
import axios from 'axios'
import Queue from 'promise-queue-plus';
import { ref } from 'vue'
// 文件上传分块任务的队列(用于移除文件时,停止该文件的上传队列) key:fileUid value: queue object
import { getTaskInfoUpload, initTaskUpload, preSignUrlUpload, preMergeUpload } from '@/api/webUploader.js';
export const webUploaderMixin = {
data() {
return {
fileUploadChunkQueue: {}
}
},
methods: {
/**
* el-upload 自定义上传方法入口
*/
async customHttpRequest(options) {
const file = options.file;
const task = await this.getTaskInfo(file); //首先匹配文件的md5,查询该md5是否存在,存在则可以直接添加
if (task) {
const { finished, path, taskRecord, attach } = task
const { fileIdentifier: identifier } = taskRecord
//如果之前已经上传过则直接添加到附件列表
if (finished) {
let attachResponse = {
code: 200,
data: attach
}
//之前已经上传过该附件直接秒传赋值就可以
this.handleSuccess(attachResponse)
return path
} else {
const errorList = await this.handleUpload(file, taskRecord, options)
if (errorList.length > 0) {
this.msgError("文件上传错误");
return;
}
// const { code, data, msg } =
let upLoadRes = await preMergeUpload(identifier)
if (upLoadRes.code === 200) {
//上传完成
this.handleSuccess(upLoadRes)
return path;
} else {
this.msgError("文件上传错误");
}
}
} else {
this.msgError("文件上传错误");
}
},
/**
* 上传逻辑处理,如果文件已经上传完成(完成分块合并操作),则不会进入到此方法中
*/
handleUpload(file, taskRecord, options) {
let lastUploadedSize = 0; // 上次断点续传时上传的总大小
let uploadedSize = 0 // 已上传的大小
const totalSize = file.size || 0 // 文件总大小
let startMs = new Date().getTime(); // 开始上传的时间
const { exitPartList, chunkSize, chunkNum, fileIdentifier } = taskRecord
// 获取从开始上传到现在的平均速度(byte/s)
const getSpeed = () => {
// 已上传的总大小 - 上次上传的总大小(断点续传)= 本次上传的总大小(byte)
const intervalSize = uploadedSize - lastUploadedSize
const nowMs = new Date().getTime()
// 时间间隔(s)
const intervalTime = (nowMs - startMs) / 1000
return intervalSize / intervalTime
}
const uploadNext = async (partNumber) => {
const start = new Number(chunkSize) * (partNumber - 1)
const end = start + new Number(chunkSize)
const blob = file.slice(start, end)
const { code, detailMsg, msg } = await preSignUrlUpload({ identifier: fileIdentifier, partNumber: partNumber })
if (code === 200 && detailMsg) {
await axios.request({
url: detailMsg,
method: 'PUT',
data: blob,
headers: { 'Content-Type': 'application/octet-stream' }
})
return Promise.resolve({ partNumber: partNumber, uploadedSize: blob.size })
}
return Promise.reject(`分片${partNumber}, 获取上传地址失败`)
}
/**
* 更新上传进度
* @param increment 为已上传的进度增加的字节量
*/
const updateProcess = (increment) => {
increment = new Number(increment)
const { onProgress } = options
let factor = 1000; // 每次增加1000 byte
let from = 0;
// 通过循环一点一点的增加进度
while (from <= increment) {
from += factor
uploadedSize += factor
//百分比与 100 进行比较,取较小的值 更新进度
const percent = Math.min((100, Number(Math.round(uploadedSize / totalSize * 100))))
this.percentage = percent ? percent : 0
onProgress({ percent: percent })
}
const speed = getSpeed();
const remainingTime = speed != 0 ? Math.ceil((totalSize - uploadedSize) / speed) + 's' : '未知'
console.log('剩余大小:', (totalSize - uploadedSize) / 1024 / 1024, 'mb');
console.log('当前速度:', (speed / 1024 / 1024).toFixed(2), 'mbps');
console.log('预计完成:', remainingTime);
}
return new Promise(resolve => {
const failArr = [];
const queue = Queue(5, {
"retry": 3, //Number of retries
"retryIsJump": false, //retry now?
"workReject": function (reason, queue) {
failArr.push(reason)
},
"queueEnd": function (queue) {
resolve(failArr);
}
})
// console.log("queue::: ", queue);
this.fileUploadChunkQueue[file.uid] = queue
this.uploadLoading = true
for (let partNumber = 1; partNumber <= chunkNum; partNumber++) {
const exitPart = (exitPartList || []).find(exitPart => exitPart.partNumber == partNumber)
if (exitPart) {
// 分片已上传完成,累计到上传完成的总额中,同时记录一下上次断点上传的大小,用于计算上传速度
lastUploadedSize += new Number(exitPart.size)
updateProcess(exitPart.size)
} else {
queue.push(() => uploadNext(partNumber).then(res => {
// 单片文件上传完成再更新上传进度
updateProcess(res.uploadedSize)
}))
}
}
if (queue.getLength() == 0) {
// 所有分片都上传完,但未合并,直接return出去,进行合并操作
resolve(failArr);
return;
}
queue.start()
})
},
/**
* 获取一个上传任务,没有则初始化一个
*/
async getTaskInfo(file) {
let task;
const identifier = await md5(file);
const { code, data, msg } = await getTaskInfoUpload(identifier);
if (code === 200) {
task = data;
if (!task) {
const initTaskData = {
identifier,
fileName: file.name,
totalSize: file.size,
chunkSize: 10 * 1024 * 1024,
};
const { code, data, msg } = await initTaskUpload(initTaskData);
if (code === 200) {
task = data;
} else {
this.msgError("文件上传错误");
}
}
} else {
this.msgError("文件上传错误");
}
return task;
},
//上传成功
handleSuccess(response, file, fileList) {
if (response.code === 200) {
setTimeout(() => {
this.uploadLoading = false;
}, 800);
let item = response.data;
this.$emit("update:approvalFileList", [...this.fileArr, item]);
if (this.isAllowReturnSuccessEmit) {
this.$emit("uploadBatchSuccess", item);
}
} else {
fileList.pop();
}
this.loading = false;
},
},
};
md5.js
import SparkMD5 from 'spark-md5'
import { Loading } from 'element-ui';
const DEFAULT_SIZE = 20 * 1024 * 1024
const md5 = (file, chunkSize = DEFAULT_SIZE) => {
return new Promise((resolve, reject) => {
const startMs = new Date().getTime();
const loading = Loading.service({
lock: true,
text: '系统处理中,请稍后!',
spinner: 'el-icon-loading',
background: 'rgba(0, 0, 0, 0.7)'
});
let blobSlice =
File.prototype.slice ||
File.prototype.mozSlice ||
File.prototype.webkitSlice;
let chunks = Math.ceil(file.size / chunkSize);
// console.log("file.size::: ", file.size);
let currentChunk = 0;
let spark = new SparkMD5.ArrayBuffer(); //追加数组缓冲区。
let fileReader = new FileReader(); //读取文件
fileReader.onload = function (e) {
spark.append(e.target.result);
currentChunk++;
if (currentChunk < chunks) {
loadNext();
} else {
const md5 = spark.end(); //完成md5的计算,返回十六进制结果。
console.log('文件md5计算结束,总耗时:', (new Date().getTime() - startMs) / 1000, 's')
loading.close()
resolve(md5);
}
};
fileReader.onerror = function (e) {
loading.close()
reject(e);
};
function loadNext() {
console.log('当前part number:', currentChunk, '总块数:', chunks);
let start = currentChunk * chunkSize;
let end = start + chunkSize;
(end > file.size) && (end = file.size);
fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
loadNext();
});
}
export default md5