大致处理流程:
1,前端把文件分割成小份,大小自定,这样每次请求只发送一份,减轻整个传输过程的压力,前端在发送完所有请求需要启动一个定时请求去后台,这个请求是为了判断后台是否已经合并完文件,给前端提示成功的。
2,后台接受完所有的分片后再再所有分片合并,写入一个文件中,至此就得到一个完整的大文件。
前端分片发请求:
function uploadFile() {
try{
let file=$('#file')[0].files[0];
//文件总大小
let fileTotalSize = file.size;
if(fileTotalSize>(500*1024*1024)){
layer.msg('文件大小不能超过500M!');
return;
}
//loading效果,上传完关闭
var loadIndex=layer.load(1, { shade: [0.1,'#fff'] });;
let fileName = file.name;
//定义分片大小 10M
let shardSize = 10*1024*1024;
//计算总分片数
let fileShardCount = Math.ceil(fileTotalSize / shardSize);
var fileUuid=uuid();
for (var i = 0; i < fileShardCount; i++) {
let num = i;
let shardFile = file.slice(num * shardSize,num != fileShardCount - 1 ? (num + 1) * shardSize : fileTotalSize);
var formData = new FormData();
formData.append('fileName', fileName); //文件名称
formData.append('fileShardCount', fileShardCount); //总分片数
formData.append('fileUuid', fileUuid); //文件唯一标识
formData.append('shardFile', shardFile); //分片文件
formData.append('fileTotalSize', fileTotalSize); //文件总大小
formData.append('shardSize', shardSize); //指定分片大小
formData.append('fileShardSize', shardFile.size); //分片大小
formData.append('fileShardNum', num);
formData.append("fileType",$('#fileType').val());
var url= rootUrl + "/fileInfo/uploadBigFile";//上传地址
jQuery.ajax({
type: 'post',
url: url,
data: formData,
contentType: false,
processData: false,
success: function (data) {
console.log("分片"+num+"已发送,大小"+shardFile.size);
if(num+1==fileShardCount){
//开始判断是否上传完成
checkFile(fileUuid,loadIndex);
}
}
});
}
}catch(e){
console.log(e);
Feng.error("上传失败!请联系开发人员!");
}
};
后台合并分片:
//接受分片数据
public void uploadBigFile(MultipartFile shardFile, String fileName, String fileUuid,
Long fileTotalSize, Long shardSize, Long fileShardSize, Integer fileShardNum, Integer fileShardCount) {
//写入硬盘
String tmpFilePath = root + uploadBigFilePath + fileUuid + fileShardNum + ".tmp";
try {
FileUtil.writeFile(tmpFilePath, shardFile.getBytes());
} catch (IOException e1) {
e1.printStackTrace();
}
//已上传的文件大小
long uploadedFileSize = (long) redisUtil.hincr("uploadBigFile", fileUuid, fileShardSize);
//已上传的文件大小和等于总文件大小则上传完成
if (uploadedFileSize == fileTotalSize) {
for (int i = 0; i < fileShardCount; i++) {
//异步合并分片文件
asyncMergeFile(fileUuid, shardSize, i,userName,fileName,fileTotalSize);
}
}
}
//多线程用nio读写数据
@Async("taskExecutor")
public void asyncMergeFile(String fileUuid, Long shardSize,
int num,String userName,String fileName,long fileTotalSize) {
String extendName = FileUtil.getExtend(fileName);
//日期目录
String directoryPath = new DateTime().toString("yyyyMMdd") + File.separator;
//目标目录
String targetFilePath = root + uploadBigFilePath + directoryPath + fileUuid+ "." + extendName;
File filedectoryPath = new File(root + uploadBigFilePath + directoryPath);
// 检查目录
if (!filedectoryPath.exists()) {
filedectoryPath.mkdirs();
}
File targetFile = new File(targetFilePath);
RandomAccessFile iraf = null;
FileChannel inChannel = null;
RandomAccessFile oraf = null;
FileChannel outChannel = null;
try {
oraf = new RandomAccessFile(targetFile, "rw");
outChannel = oraf.getChannel();
String shardFilePath = root + uploadBigFilePath + fileUuid + num + ".tmp";
iraf = new RandomAccessFile(shardFilePath, "r");
inChannel = iraf.getChannel();
// 内存映射文件
MappedByteBuffer inMappedBuf = inChannel.map(MapMode.READ_ONLY, 0, inChannel.size());
MappedByteBuffer outMappedBuf = null;
outMappedBuf = outChannel.map(MapMode.READ_WRITE, num * shardSize, inChannel.size());
// 直接对缓冲区进行数据的读写操作
byte[] dst = new byte[(int) inChannel.size()];
inMappedBuf.get(dst);
outMappedBuf.put(dst);
Cleaner inMLocalCleaner = ((DirectBuffer) inMappedBuf).cleaner();
if (inMLocalCleaner != null) {
inMLocalCleaner.clean();
}
Cleaner outMLocalCleaner = ((DirectBuffer) outMappedBuf).cleaner();
if (outMLocalCleaner != null) {
outMLocalCleaner.clean();
}
logger.info("大文件上传,合并分片{}完成,文件名:{},MD5:{}", num, fileName, fileUuid);
//删除已合并的分片
new File(shardFilePath).delete();
//剩余为合并文件大小
long surplusFileSize = (long) redisUtil.hdecr("uploadBigFile", fileUuid, inChannel.size());
if(surplusFileSize==0){
//存库
//数据库记录文件信息,后续前端判断文件是否上传合并完成也是查这个数据
logger.info("大文件上传,全部分片合并完成,已存库,MD5:{},序号:{}", fileUuid, num);
redisUtil.hdel("uploadBigFile", fileUuid);
}
} catch (Exception e) {
logger.error("大文件上传,合并分片失败,文件名:{},MD5:{},序号:{}", fileName, fileUuid, num);
e.printStackTrace();
} finally {
if (inChannel != null) {
try {
inChannel.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (iraf != null) {
try {
iraf.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (outChannel != null) {
try {
outChannel.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (oraf != null) {
try {
oraf.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}