所用技术栈Vue3+TS+Axios+Arco-design
实现:大文件,多文件分片上传,上传列表信息展示,文件删除,分页等功能,并且能动态控制上传池,并做到当上传池慢等待上传,当任务完成再次进入,可以一次性传多个分片,代码目前选择为3。
逻辑代码注释齐全
实现样式如图所示
页面代码
<template>
<div>
<a-upload :auto-upload="false" ref="uploadRef" @change="fileChange" :show-file-list="false" multiple draggable>
</a-upload>
<a-table :columns="columns" :data="fileTableData" :scroll="{ y: 400 }">
<template #status="{ record }">
<span v-if="record.status === 'init'">待上传</span>
<span v-if="record.status === 'uploading'">上传中</span>
<span v-if="record.status === 'success'">上传成功</span>
</template>
<template #progress="{ record }">
<a-progress :percent="record.progress" :style="{ width: '100%' }" :color="{
'0%': 'rgb(var(--primary-6))',
'100%': 'rgb(var(--success-6))',
}" />
</template>
<template #delete="{ record }">
<a-button @click="deleteFile(record)" :disabled="record.status === 'success'">删除</a-button>
</template>
</a-table>
<a-button type="primary" @click="submit" :disabled="isdisableUpload">开始上传</a-button>
</div>
</template>
逻辑代码:相关注解都在代码中
<script setup lang="ts">
import { computed, ref } from "vue";
import SparkMD5 from "spark-md5";
import { Message } from "@arco-design/web-vue";
import axios from "axios";
const uploadRef = ref();
let chunkSize = 1024 * 1024;
// 存放所有分片内容
let chunkRes = ref();
let fileName = ref("");
let fileHash = ref("");
// 控制第几个文件上传
let fileNum = 0;
const fileTableData = ref<any>([]);
const columns = [
{
title: "文件名",
dataIndex: "name",
},
{
title: "上传状态",
dataIndex: "status",
slotName: "status",
},
{
title: "进度",
dataIndex: "progress",
slotName: "progress",
},
{
title: "删除",
dataIndex: "delete",
slotName: "delete",
},
];
// 伤删除文件
const deleteFile = (record: any) => {
const index = fileTableData.value.findIndex((item: any) => item.uid === record.uid);
// 这里的意思是当文件正在上传的时候,点击了删除按钮,且非删除最后一个正在上传的文件时,会紧接着上传下一个文件
if (fileTableData.value[index].status === "uploading") {
fileTableData.value.splice(index, 1);
if (fileTableData.value.length !== index) {
submit();
}
} else {
fileTableData.value.splice(index, 1);
}
};
const fileChange = async (value: any) => {
fileTableData.value = value;
};
const startFileChunkHash = async () => {
// 获取文件
const files = fileTableData.value[fileNum].file;
if (!files) {
return;
}
// 获取文件名
fileName.value = files.name;
// 文件分片
chunkRes.value = createFileChunk(files);
// 获取md5唯一秘钥
const res = await getUniqueHash(chunkRes.value);
fileHash.value = res as string;
};
// 执行文件分片操作
const createFileChunk = (file: File) => {
const chunkList = [];
let cur = 0;
while (cur < file.size) {
chunkList.push(file.slice(cur, cur + chunkSize));
cur += chunkSize;
}
return chunkList;
};
// 使用hash识别文件,实现秒传的功能
const getUniqueHash = (fileChunks: any) => {
return new Promise((resolve) => {
const spark = new SparkMD5.ArrayBuffer();
const fileReader = new FileReader();
const target: Blob[] = [];
fileChunks.forEach((item: Blob, index: number) => {
if (index === 0 || index === fileChunks.length - 1) {
target.push(item);
} else {
// 当前切片的前面2字节
target.push(item.slice(0, 2));
// 中间的两个字节
target.push(item.slice(chunkSize / 2, chunkSize / 2 + 2));
// 后面的两个字节
target.push(item.slice(chunkSize - 2, chunkSize));
}
});
fileReader.readAsArrayBuffer(new Blob(target)); //将target中的Blob对象转化为ArrayBuffer
fileReader.onload = (e: any) => {
//onload方法是异步的
spark.append(e.target?.result); //SparkMD5 库的 append 方法将读取的 ArrayBuffer 添加到 MD5 计算中
resolve(spark.end());
};
});
};
// 上传分片
const uploadChunk = async (chunkRes: any, existsChunks: string[]) => {
// 处理数据
const data = chunkRes.map((item: Blob, index: number) => {
return {
fileHash: fileHash.value,
chunkHash: `${fileHash.value}-${index}`,
chunk: item,
size: item.size,
};
});
// 添加为formData对象
const formDatas = data.filter((item: any) => !existsChunks.includes(item.chunkHash))
.map((item: any) => {
const formData = new FormData();
formData.append("chunk", item.chunk);
formData.append("chunkHash", item.chunkHash);
formData.append("fileName", fileName.value);
formData.append("fileHash", fileHash.value);
return formData;
});
fileTableData.value[fileNum].status = "uploading";
// 控制它的最大请求书
let max = 3;
const taskPool: any = []; //文件控制池
let index = 0;
while (index < formDatas.length) {
const task = axios.post("http://localhost:3000/upload", formDatas[index]);
taskPool.push(task);
// 当任务执行完成之后,删除taskPool内已完成的数据
task.then(() => {
taskPool.splice(taskPool.findIndex((item: any) => item === task));
fileTableData.value[fileNum].progress = Number((index / formDatas.length).toFixed(2));
});
// 当数组中的数据为3的时候,循环稍等,等其中一个完成再继续,也就是说请求池满了之后,需要等待
if (taskPool.length === max) {
// Promise.race() 方法会等待其中任意一个 Promise 对象状态发生变化,
// 一旦有一个 Promise 对象状态变为 resolved(已完成)或 rejected(已拒绝),
// Promise.race() 就会立即返回,不再等待其他 Promise 对象的状态变化。
await Promise.race(taskPool);
}
index++;
}
//循环结束后,使用 Promise.all() 方法等待所有的上传任务完成。Promise.all就会等待所有的文件全部完成
await Promise.all(taskPool);
// 所有的文件都上传完毕了,实现合并请求
mergeFile();
};
const mergeFile = async () => {
const res = await axios.post("http://localhost:3000/merge", {
fileName: fileName.value,
fileHash: fileHash.value,
size: chunkSize,
});
if (res.status === 200) {
Message.success({
content: "文件上传成功",
});
getResultAndNextUpload();
}
};
const getResultAndNextUpload = () => {
// 显示上成功信息
fileTableData.value[fileNum].progress = 1;
fileTableData.value[fileNum].status = "success";
fileNum++;
if (fileNum < fileTableData.value.length) {
submit();
}
};
// 秒传
const secondPass = async () => {
const res = await axios.post("http://127.0.0.1:3000/verify", {
fileName: fileName.value,
fileHash: fileHash.value,
});
return res.data;
};
const submit = async () => {
if (fileTableData.value.length === 0) {
return;
}
await startFileChunkHash();
try {
const { data } = await secondPass();
if (!data.shouldUpload) {
Message.success({
content: "文件秒传成功",
});
getResultAndNextUpload();
return;
}
uploadChunk(chunkRes.value, data.existsChunks);
} catch (error) {
console.log(error)
}
};
// 是否禁用提交按钮
const isdisableUpload = computed(() => {
return fileTableData.value.every((item) => item.status === "success");
});
</script>