一、说明
解决问题是文件太大,如果上传一个很大文件到服务器,会引起超时导致文件无法上传,这时可以将文件切割成一个一个小的文件循环上传到服务器后再拼接成完整的文件就可以解决这个问题,而且还可以通过并发提速。
二、前端
下面代码分为3块,获取初始化ID、循环切割文件并上传、判断上传数量并申请合并文件
<template>
<div class="components-upload">
<el-upload
action
:auto-upload="false"
:on-change="onChange"
:accept="'video/*'"
:show-file-list="false"
drag
>
<i class="el-icon-plus avatar-uploader-icon"></i>
</el-upload>
<el-progress v-if="progressFlag" :percentage="loadProgressCount"></el-progress>
</div>
</template>
<script>
import md5 from 'js-md5'
import { mapGetters } from 'vuex'
export default {
name: 'upload',
props: {
className: {
type: String
}
},
data (){
return {
video: '',
fileMD5: '',
progressFlag: true,
loadProgress: 0, // 进行到哪个文件 根据百分比换算loadProgressCount
loadProgressCount: 0, // 当前进度
chunkCount: 0, // 分片总数
chunkSize: 2 * 1024 * 1024, // 2MB一片
uploadId: '782EF406BFFF421AB10AC3292ABB2ACC', // oss文件标识
videoName: ''
}
},
methods:{
onChange(event) {
// 清空数据
Object.assign(this.$data, this.$options.data())
this.video = event.raw;
if(!this.video) return;
this.videoName = this.video.name
this.chunkCount = Math.ceil(this.video.size / this.chunkSize) // 切片数量
let fileRederInstance = new FileReader()
fileRederInstance.readAsBinaryString(this.video)
fileRederInstance.addEventListener('load', e => {
let fileBolb = e.target.result
this.fileMD5 = md5(fileBolb) // 文件秒传的关键,文件生成的md5是唯一的,如果判断上传的文件和数据库原来上传过的文件md5值相同就可以直接吐出原来的文件地址,达到文件秒传的效果
// 1、初始化
this.api({
url: `${this.urlData.weikeUploadInitiate}?fileName=${this.video.name}`,
method: "get"
}).then(data => {
if(data.status == 200){
this.uploadId = data.data
// 2、 执行分片上传
this.readChunkMD5(0)
}else{
this.$message({
message: `${data.message}`,
type: "error"
});
}
console.log(data)
})
})
},
// 切片
getChunkInfo ( currentChunk ) {
let start = currentChunk * this.chunkSize // 起始位置
let end = Math.min(this.video.size, start + this.chunkSize) // 结束位置
let chunk = this.video.slice(start, end) // 切片内容
console.log(start,end,chunk)
return { start, end, chunk }
},
// 针对每个文件进行chunk处理
readChunkMD5 (num) {
const { chunk } = this.getChunkInfo(num)
// 如果当前上传的文件小于总数量就执行上传操作,如果大于当前数量走合并操作
if(num < this.chunkCount){
let fetchForm = new FormData()
fetchForm.append('chunk', num+1) // 当前分片数
fetchForm.append('chunks', this.chunkCount)
fetchForm.append('file', chunk) // 当前分片文件内容
fetchForm.append('md5', this.fileMD5)
fetchForm.append('objectName', this.videoName)
fetchForm.append('uploadId', this.uploadId)
fetchForm.append('curPartSize', chunk.size)
this.api({
url: this.urlData.weikeUpload,
method: "post",
data: fetchForm
}).then(data => {
console.log(data)
if (data.status == 200) {
// 每次上传更新进度
this.loadProgress ++
num = num + 1
console.log(num)
// 继续执行上传操作
this.readChunkMD5(num)
} else {
// 一直失败可能需要判断一个失败数量然后弹出失败
this.readChunkMD5(num)
console.log("失败数据".chunkInfo)
this.$message({
message: `${data.message}`,
type: "error"
});
}
}).catch((e)=>{
// 如果失败了就再次执行(本地测试会出现上传超时,不知道怎么解决,所以在此上传这个分片)
this.readChunkMD5(num)
})
}else{
// 合并
this.api({
url: this.urlData.weikeUploadComplete,
method: "post",
data: {
uploadId: this.uploadId,
objectName: this.videoName
}
}).then(data => {
console.log(data);
}).catch((e)=>{
})
}
}
},
watch: {
// 监听进度变化
loadProgress(newVal, oldVal) {
console.log("数值发生变化")
if(this.loadProgress == 0){
this.loadProgressCount = 0
}else{
this.loadProgressCount = Math.floor(this.loadProgress / this.chunkCount * 100)
}
}
},
computed: {
...mapGetters([
'urlData',
'host'
])
}
}
</script>
<style>
.components-upload .el-upload , .components-upload .el-upload-dragger{
width:100% !important;
}
</style>
<style lang="scss" scoped>
.components-upload{
width:100%;
// .input{
// width:100%;
// }
}
</style>
三、Java+OSS+Redis实现切片
Controller接参
/**
* 初始化分片接口
* @return
*/
@GetMapping( "/upload/initiate")
@ResponseBody
private Result initiate(@RequestParam("fileName") String fileName){
String uploadId = ossUploadService.initiate(fileName);
return Result.success("uploadId",uploadId);
}
// 上传
@RequestMapping("/upload")
@ResponseBody
private Result upload(@RequestParam("file") MultipartFile file,HttpServletRequest request) throws IOException {
Integer chunk = Integer.valueOf(request.getParameter("chunk"));
Integer chunks = Integer.valueOf(request.getParameter("chunks"));
// md5可以判断数据库中是否存在直接返回文件地址 (大文件秒传)
String md5 = request.getParameter("md5");
String objectName = request.getParameter("objectName");
String uploadId = request.getParameter("uploadId");
Integer curPartSize = Integer.valueOf(request.getParameter("curPartSize"));
InputStream in = file.getInputStream();
ossUploadService.upload(objectName, uploadId, in, curPartSize, chunk);
return Result.success();
}
// 合并
@PostMapping("/upload/complete")
@ResponseBody
private Result complete(@RequestBody Map<String,Object> request) {
String objectName = (String) request.get("objectName");
String uploadId = (String) request.get("uploadId");
ossUploadService.complete(objectName,uploadId);
return Result.success();
}
impl实现
import cn.hutool.json.JSONString;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.model.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.BoundHashOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
@Service
public class OssUploadServiceImpl implements OssUploadService {
@Autowired
private OSSClient ossClient
@Value("${aliyun.oss.bucketName}")
private String bucketName;
private String path = "H5/weike/video/";
@Autowired
private RedisTemplate redisTemplate;
/**
* OSS分片上传初始化
* @param objectName
* @return uploadId
*/
@Override
public String initiate(String objectName){
// 创建InitiateMultipartUploadRequest对象。
System.out.println(ossClient);
InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName,path.concat(objectName));
InitiateMultipartUploadResult upresult = ossClient.initiateMultipartUpload(request);
// 返回uploadId,它是分片上传事件的唯一标识。您可以根据该uploadId发起相关的操作,例如取消分片上传、查询分片上传等。
String uploadId = upresult.getUploadId();
return uploadId;
}
/**
* 上传分片
* @param objectName 文件名
* @param uploadId 初始化ID
* @param inputStream 文件流
* @param curPartSize 当前切片大小
* @param partNum 第几个切片
* @return
*/
@Override
public void upload(String objectName, String uploadId, InputStream inputStream, Integer curPartSize, Integer partNum){
UploadPartRequest uploadPartRequest = new UploadPartRequest();
uploadPartRequest.setBucketName(bucketName);
uploadPartRequest.setKey(path.concat(objectName));
uploadPartRequest.setUploadId(uploadId);
uploadPartRequest.setInputStream(inputStream);
// 设置分片大小。除了最后一个分片没有大小限制,其他的分片最小为100 KB。
uploadPartRequest.setPartSize(curPartSize);
// 设置分片号。每一个上传的分片都有一个分片号,取
//
// 值范围是1~10000,如果超出此范围,OSS将返回InvalidArgument错误码。
uploadPartRequest.setPartNumber(partNum);
// 每个分片不需要按顺序上传,甚至可以在不同客户端上传,OSS会按照分片号排序组成完整的文件。
UploadPartResult uploadPartResult = ossClient.uploadPart(uploadPartRequest);
// 每次上传分片之后,OSS的返回结果包含PartETag。PartETag将被保存在partETags中。
PartETag partETag = uploadPartResult.getPartETag();
String string = JSON.toJSONString(partETag);
redisTemplate.opsForList().leftPush(uploadId, string);
}
/**
* 合并分片
* @param objectName
* @param uploadId
*/
@Override
public void complete(String objectName, String uploadId){
List<PartETag> partETags = new ArrayList<>();
Long size = redisTemplate.opsForList().size(uploadId);
List<String> partETagList = redisTemplate.opsForList().range(uploadId, 0, size);
for (String s : partETagList) {
PartETag partETag = JSON.parseObject(s, PartETag.class);
partETags.add(partETag);
}
// 创建CompleteMultipartUploadRequest对象。
// 在执行完成分片上传操作时,需要提供所有有效的partETags。OSS收到提交的partETags后,会逐一验证每个分片的有效性。当所有的数据分片验证通过后,OSS将把这些分片组合成一个完整的文件。
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(bucketName,path.concat(objectName) , uploadId, partETags);
// 完成上传。
ossClient.completeMultipartUpload(completeMultipartUploadRequest);
}
}
四、Java实现本地分片上传(与前端代码不对应)
原理都是一样,接收切片内容保存到本地,然后判断接收完成后将所有切片整合成一个文件
package com.test.controller;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.io.FileUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Controller
public class UploadController {
@Value("${shopk.upload.charset}")
private String charset;
@Value("${shopk.upload.temFile}")
private String temFile;
@RequestMapping("/upload")
@ResponseBody
public Object upload(HttpServletRequest request, HttpServletResponse response) throws Exception {
response.setCharacterEncoding(charset);
Integer chunk = null;
Integer chunks = null;
String name = null;
// 将md5和文件对应起来,如果存在该md5 不需要上传,直接提示上传成功并返回对应文件地址
String md5 = null;
String filePath = temFile;
BufferedOutputStream os = null;
try{
// 解析器工厂
DiskFileItemFactory fileItemFactory = new DiskFileItemFactory();
// 缓冲区
fileItemFactory.setSizeThreshold(1024);
// 临时目录
fileItemFactory.setRepository(new File(filePath));
// 文件上传解析器
ServletFileUpload servletFileUpload = new ServletFileUpload(fileItemFactory);
// 单个文件5M限制
servletFileUpload.setFileSizeMax(5l * 1024l * 1024l * 1024l);
// 总文件10G限制
servletFileUpload.setSizeMax(10l * 1024l * 1024l * 1024l);
List<FileItem> items = servletFileUpload.parseRequest(request);
for (FileItem item : items) {
if(item.isFormField()){
if("chunk".equals(item.getFieldName())){
chunk = Integer.parseInt(item.getString(charset));
}
if("chunks".equals(item.getFieldName())){
chunks = Integer.parseInt(item.getString(charset));
}
if("name".equals(item.getFieldName())){
name = item.getString(charset);
}
if("md5".equals(item.getFieldName())){
md5 = item.getString(charset);
}
}
}
for (FileItem item : items) {
if(!item.isFormField()){
String temFileName = name;
if(name != null){
if(chunk != null){
temFileName = chunk + "_" + md5 + name;
}
File temFile = new File(filePath , temFileName);
if(!temFile.exists()){ // 判断文件已存在就不需要再传,可以断点续传
System.out.println(item);
item.write(temFile);
}
}
}
}
// 文件合并
if(chunk != null && chunk.intValue() == chunks.intValue() - 1){
File tempFile = new File(filePath, name);
os = new BufferedOutputStream(new FileOutputStream(tempFile));
for(int i = 0;i < chunks;i++){
File file = new File(filePath, i + "_" +md5+ name);
while(!file.exists()){
Thread.sleep(100);
}
byte[] bytes = FileUtils.readFileToByteArray(file);
os.write(bytes);
os.flush();
file.delete();
}
os.flush();
}
Map<String,Object> result= new HashMap<>();
result.put("code",200);
result.put("message","成功");
return result;
} catch (FileUploadException | UnsupportedEncodingException e) {
e.printStackTrace();
Map<String,Object> result= new HashMap<>();
result.put("code",1000);
result.put("message","失败");
return result;
} finally {
if(os != null){
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
参考文章:http://blog.ncmem.com/wordpress/2023/10/23/%e6%96%87%e4%bb%b6%e5%88%86%e7%89%87%e4%b8%8a%e4%bc%a0%e3%80%81%e6%96%ad%e7%82%b9%e7%bb%ad%e4%bc%a0%e3%80%81%e5%a4%a7%e6%96%87%e4%bb%b6%e7%a7%92%e4%bc%a0/
欢迎入群一起讨论