1普通上传FormData(请求头Content-Type='multipart/form-data')
<template>
<div>
<input multiple type="file" @change="changes">
</div>
</template>
<script>
export default {
name: 'news',
data() {
return {
}
},
methods: {
changes(e){
let formDate=new FormData()
formDate.append('file',e.target.files[0])
formDate.append('fileName',e.target.files[0].name)
//通过接口把formDate返回给后端
}
}
}
</script>
2普通上传Base64(请求头Content-Type='application/x-www-form-urlencoded')
<template>
<div>
<input multiple type="file" @change="changes">
</div>
</template>
<script>
export default {
name: 'news',
data() {
return {
}
},
methods: {
changes(e){
let fileReader = new FileReader();
fileReader.readAsDataURL(e.target.files[0]);
fileReader.onload = ev => {
//通过接口把ev.target.result返回给后端
};
}
}
}
</script>
如果相加进度条可以使用axios
data = await axios.post(地址, 参数, {
onUploadProgress(ev) {
let {loaded, total } = ev;
}
});
loaded:已经传了多少
total:一共有多少
3大文件上传
思路:通过把文件切成小份传给后端,后端在组合起来
通过SparkMD5插件获取文件hash,hash代表文件的唯一值,实现断点续传,
当上传文件前都要向后端检查当前文件是否存在就是通过hash做对比
const changeBuffer = file => {
return new Promise(resolve => {
let fileReader = new FileReader();
fileReader.readAsArrayBuffer(file);
fileReader.onload = ev => {
let buffer = ev.target.result,
spark = new SparkMD5.ArrayBuffer(),
HASH,
suffix;
spark.append(buffer);
HASH = spark.end();
suffix = /\.([a-zA-Z0-9]+)$/.exec(file.name)[1];
resolve({
buffer,
HASH,
suffix,
filename: `${HASH}.${suffix}`
});
};
});
};
// 实现文件切片处理
let max = 1024 * 100,//切片大小
count = Math.ceil(file.size / max),
index = 0,
chunks = [];
while (index < count) {
chunks.push({
file: file.slice(index * max, (index + 1) * max),
filename: `${HASH}_${index+1}.${suffix}`
});
index++;
}
先向后端检查文件是否已经有了
// 把每一个切片都上传到服务器上
chunks.forEach(chunk => {
// (已经上传的无需在上传)
let fm = new FormData;
fm.append('file', chunk.file);
fm.append('filename', chunk.filename);
//把fm 传给后端
});