首先要下载下列包
//发送请求
npm install axios
//计算哈希值
npm install spark-md5
//文件上传包
npm install multiparty
//文件操作
npm install fs-extra
vue3前端
<template>
<input @change="shangchuan($event)" type="file"><hr>
<video :src="filePath" style="width: 300px;height: 200px;" controls></video>
</template>
<script setup>
//1.选择文件,进行切片
//2.计算哈希值
//3.上传分片
//4.上传完毕之后,通知服务器合并文件
//5.秒传:发送请求校验哈希值
//6.断点续传:过滤掉已有的切片
import axios from "axios"
import SparkMD5 from "spark-md5"
import {ref} from "vue"
let filePath = ref("") //后端返回给前端最终的路径
const SIZE = 1024*1024 //每个切片设置为1MB
let filehash = ref("") //保存哈希值
let filename = ref("") //保存文件名称
let cur = 0 //切片的开始位置
let chunks = [] //保存切片的数组
//文件分片函数
function fileSlice(event){
while(cur<event.target.files[0].size){
var bold = event.target.files[0].slice(cur,cur+SIZE)
chunks.push(bold)
cur+=SIZE
}
}
//哈希值计算
function calhash(){
return new Promise(resolve=>{
let targets = []
let spark = new SparkMD5.ArrayBuffer()
let fileReader = new FileReader()
chunks.forEach((ele,index)=>{
//计算第一个和最后一个切片的所有字符
if(index==0 || index==chunks.length-1){
targets.push(ele)
}else{
//计算中间切片的前两个,中间两个,最后两个字符
targets.push(ele.slice(0,2))
targets.push(ele.slice(SIZE / 2, SIZE / 2 + 2))
targets.push(ele.slice(SIZE - 2, SIZE))
}
})
fileReader.readAsArrayBuffer(new Blob(targets))
fileReader.onload = (e) =>{
spark.append(e.target.result)
resolve(spark.end())
}
})
}
//妙传,检验哈希值
function verify(){
return axios.post("http://localhost:3000/verify",{
fileHash:filehash.value,
fileName:filename.value,
}).then((res)=>{
return res
})
}
//上传分片
async function uploadChunks(existChunks){
let data = chunks.map((chunk,index)=>{
return {
filehash:filehash.value,
chunkHash:filehash.value + "-" +index,
chunk
}
})
let formDatas = data
.filter(item=> !existChunks.includes(item.chunkHash))
.map((item)=>{
let formData = new FormData()
formData.append("fileHash",item.filehash)
formData.append("chunkHash",item.chunkHash)
formData.append("chunk",item.chunk)
return formData
})
let max = 6 //最大并发请求数
let index = 0 //formData数组下标
let taskPool = [] //请求池
while(index < formDatas.length){
let task = axios.post("http://localhost:3000/imgUpload",formDatas[index])
taskPool.splice(taskPool.findIndex(item=>item==task))
taskPool.push(task)
if(taskPool.length == max){
await Promise.race(taskPool)
}
index++
await Promise.all(taskPool)
}
//上传完毕之后,通知服务器去合并文件
var obj = {
fileHash:filehash.value,
fileName:filename.value,
size:SIZE
}
axios.post("http://localhost:3000/merge",obj).then((res)=>{
console.log(res.data);
filePath.value = res.data.filePath
console.log(filePath.value);
})
}
async function shangchuan(event){
filename.value = event.target.files[0].name
//文件分片函数
fileSlice(event)
//哈希值计算函数
var hash = await calhash()
filehash.value = hash
//校验哈希值
var veri = await verify()
console.log(veri.data);
if(veri.data.code == 201){
filePath.value = veri.data.filePath
console.log(filePath.value);
}
//如果是200,就表示文件还没有上传完
if(veri.data.code == 200){
//上传分片函数
uploadChunks(veri.data.existChunks)
}
}
</script>
<style>
</style>
node.js后端
var express = require('express');
var router = express.Router();
var multiparty = require("multiparty")
var path = require('path');
var fse = require("fs-extra");
const UPLOAD_DIR = path.resolve(__dirname,"upload")
/* GET home page. */
router.get('/', function(req, res, next) {
res.render('index', { title: 'Express' });
});
//提取文件后缀名
const extractExt = filename =>{
return filename.slice(filename.lastIndexOf("."),filename.length)
}
//上传分片请求
router.post("/imgUpload",(req,res)=>{
var form = new multiparty.Form()
form.parse(req,async function(err,e,imgData){
if(err){
res.send({
code:401,
msg:"上传失败,请重新上传"
})
return
}
let fileHash = e["fileHash"][0]
let chunkHash = e["chunkHash"][0]
//存放临时目录
const chunkPath = path.resolve(UPLOAD_DIR,fileHash)
if(!fse.existsSync(chunkPath)){
await fse.mkdir(chunkPath)
}
//将切片放到这个文件夹里边
let oldPath = imgData["chunk"][0]["path"]
await fse.move(oldPath,path.resolve(chunkPath,chunkHash))
res.status(200).json({
msg:"上传成功"
})
})
})
//合并分片请求
router.post("/merge",async(req,res)=>{
let {fileHash,fileName,size} = req.body
//完整的文件路径
let filePath = path.resolve(UPLOAD_DIR,fileHash + extractExt(fileName))
//如果已经存在,就没必要合并了
if(fse.existsSync(filePath)){
res.status(200).json({
msg:"合并成功"
})
return
}
//哈希值路径不存在
const chunkDir = path.resolve(UPLOAD_DIR,fileHash)
if(!fse.existsSync(chunkDir)){
res.status(401).json({
msg:"合并失败,请重新上传"
})
return
}
//合并操作
//获取每个切片的路径
let chunkPaths = await fse.readdir(chunkDir)
//切片排序
chunkPaths.sort((a,b)=>{
return a.split("-")[1] - b.split("-")[1]
})
var list = chunkPaths.map((chunkName,index)=>{
return new Promise(resolve=>{
//拼接成一条完整路径
let chunkPath = path.resolve(chunkDir,chunkName)
//读取拼接好的路径
let readStream = fse.createReadStream(chunkPath)
//将拼接好的路径写入到filepath中
let writeStream = fse.createWriteStream(filePath,{
//写入的起始位置
start:index * size,
end:(index + 1) * size
})
//合并完成之后删除拼接成的路径
readStream.on('end', async () => {
await fse.unlink(chunkPath)
resolve()
})
//将可读流的数据写入可写流
readStream.pipe(writeStream)
})
})
//等到所有的promise完成之后删除临时切片文件
await Promise.all(list)
await fse.remove(chunkDir);
res.send({
code:200,
msg:"合并成功",
filePath:"http://localhost:3000/routes/upload/" + fileHash + extractExt(fileName)
})
})
//校验哈希值请求
router.post('/verify', async (req, res) => {
const { fileHash, fileName } = req.body
const filePath = path.resolve(UPLOAD_DIR, fileHash + extractExt(fileName)) // 文件路径
if (fse.existsSync(filePath)) {
// 文件存在,不需要重新上传
res.send({
code:201,
msg:"文件存在",
filePath:"http://localhost:3000/routes/upload/" + fileHash + extractExt(fileName)
})
} else {
// 文件不存在,重新上传
// 返回服务器已经上传成功的切片
const chunkDir = path.join(UPLOAD_DIR, fileHash)
let chunkPaths = []
if (fse.existsSync(chunkDir)) {
// 如果存在已经上传的部分切片, 只需要上传没有的切片(断点续传)
chunkPaths = await fse.readdir(chunkDir)
}
res.send({
code:200,
msg:"文件不存在,上传中",
existChunks:chunkPaths
})
}
})
module.exports = router;
最后别忘了在app.js中注册这一行:
将指定路径下的文件夹('routes','upload')设置为静态文件目录,并将其映射到
/routes/upload
路径上,以允许客户端能访问到资源。
app.use("/routes/upload",express.static(path.join(__dirname,'routes','upload')))
这样设置,是因为我的目录结构是这样的:
ok啦!!!