断点续传
1. 数据库表设计
-- pfqv_v2_test_prod0603.file_chunk definition
CREATE TABLE `file_chunk` (
`id` bigint NOT NULL COMMENT '主键ID',
`chunk_number` int NOT NULL COMMENT '文件块编号',
`chunk_size` bigint NOT NULL COMMENT '分块大小',
`current_chunk_size` bigint NOT NULL COMMENT '当前分块大小',
`filename` varchar(255) NOT NULL COMMENT '文件名',
`identifier` varchar(255) NOT NULL COMMENT '文件标识,MD5',
`relative_path` varchar(255) NOT NULL COMMENT '相对路径',
`total_chunks` int NOT NULL COMMENT '总块数',
`total_size` bigint NOT NULL COMMENT '总大小',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`create_by` bigint DEFAULT NULL COMMENT '创建人',
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`update_by` bigint DEFAULT NULL COMMENT '更新人',
`is_delete` char(1) DEFAULT '0' COMMENT '删除标志(0代表存在 1代表删除)',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='文件分片记录表';
-- pfqv_v2_test_prod0603.file_list definition
CREATE TABLE `file_list` (
`id` bigint NOT NULL COMMENT '主键ID',
`filename` varchar(200) DEFAULT NULL COMMENT '文件名',
`identifier` varchar(50) DEFAULT NULL COMMENT '唯一标识,MD5',
`url` varchar(200) DEFAULT NULL COMMENT '链接',
`location` varchar(200) DEFAULT NULL COMMENT '本地地址',
`total_size` bigint DEFAULT NULL COMMENT '文件总大小',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`create_by` bigint DEFAULT NULL COMMENT '创建人',
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
`update_by` bigint DEFAULT NULL COMMENT '更新人',
`is_delete` char(1) DEFAULT '0' COMMENT '删除标志(0代表存在 1代表删除)',
PRIMARY KEY (`id`),
UNIQUE KEY `FILE_UNIQUE_KEY` (`filename`,`identifier`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC COMMENT='文件列表 - 用于保存断点续传上传成功后的文件';
2. 实体类
domain
package com.ruoyi.codecleanliness5s.domain;
import lombok.Data;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import com.ruoyi.common.core.annotation.Excel;
import com.ruoyi.common.core.web.domain.BaseEntity;
import org.springframework.web.multipart.MultipartFile;
/**
* 文件分片记录对象 file_chunk
*
* @author ruoyi
* @date 2024-09-13
*/
@Data
public class FileChunk extends BaseEntity
{
private static final long serialVersionUID = 1L;
/** 主键ID */
private Long id;
/** 文件块编号 */
@Excel(name = "文件块编号")
private Long chunkNumber;
/** 分块大小 */
@Excel(name = "分块大小")
private Long chunkSize;
/** 当前分块大小 */
@Excel(name = "当前分块大小")
private Long currentChunkSize;
/** 文件名 */
@Excel(name = "文件名")
private String filename;
/** 文件标识,MD5 */
@Excel(name = "文件标识,MD5")
private String identifier;
/** 相对路径 */
@Excel(name = "相对路径")
private String relativePath;
/** 总块数 */
@Excel(name = "总块数")
private Long totalChunks;
/** 总大小 */
@Excel(name = "总大小")
private Long totalSize;
/**
* 二进制文件
*/
private MultipartFile file;
}
package com.ruoyi.codecleanliness5s.domain.response;
import lombok.Data;
import java.util.ArrayList;
import java.util.List;
/**
* 返回断点续传文件的响应信息
*/
@Data
public class CheckChunkResponse {
private boolean skipUpload = false;
private String url;
private List<Long> uploaded = new ArrayList<>();
private boolean needMerge = true;
//暂时没用上,通过设置此标识,可以在前端做判断,将一些状态码为200却有问题的返回信息进行拦截,前端相关代码已注释
private boolean result = true;
}
3. 控制层
/**
* 检查文件上传状态
*/
@GetMapping("/breakPointUpload")
public CheckChunkResponse breakPointStatus(@ModelAttribute FileChunk chunk, HttpServletResponse response)
{
//查询根据md5查询文件是否存在
CheckChunkResponse fileUpload = fsUploadFileService.breakPointStatus(chunk, response);
return fileUpload;
}
/**
* 上传文件
*/
@PostMapping("/breakPointUpload")
public AjaxResult postFileUpload(@ModelAttribute FileChunk chunk, HttpServletResponse response)
{
int i = fsUploadFileService.postFileUpload(chunk, response);
if (i > 0){
Map<String, Object> map = new HashMap<>();
map.put("needMerge", true);
map.put("result", true);
return AjaxResult.success(map);
}else{
return toAjax(i);
}
}
/**
* 合并文件
*/
@PostMapping("/merge")
public AjaxResult merge(@RequestBody FileList fileInfo) {
return fsUploadFileService.mergeFile(fileInfo);
}
4. 应用层
/**
* 检查断点续传文件上传状态
* @param chunk 分片对象
* @param response
* @return
*/
@Override
public CheckChunkResponse breakPointStatus(FileChunk chunk, HttpServletResponse response) {
CheckChunkResponse chunkResponse = new CheckChunkResponse();
//检查该文件是否存在于filelist中,如果存在,直接返回skipUpload为true,执行闪传
FileList filelist = new FileList();
filelist.setIdentifier(chunk.getIdentifier());
List<FileList> backFilelists = fileListMapper.selectFileListList(filelist);
if (backFilelists != null && !backFilelists.isEmpty()) {
response.setStatus(HttpServletResponse.SC_CREATED);
chunkResponse.setSkipUpload(true);
return chunkResponse;
}
FileChunk resultChunk = new FileChunk();
resultChunk.setIdentifier(chunk.getIdentifier());
List<FileChunk> backChunks = chunkMapper.selectFileChunkList(resultChunk);
//将已存在的块的chunkNumber列表返回给前端,前端会规避掉这些块
if (backChunks != null && !backChunks.isEmpty()) {
List<Long> collect = backChunks.stream().map(FileChunk::getChunkNumber).collect(Collectors.toList());
chunkResponse.setUploaded(collect);
}
return chunkResponse;
}
/**
* 每一个上传块都会包含如下分块信息:
* chunkNumber: 当前块的次序,第一个块是 1,注意不是从 0 开始的。
* totalChunks: 文件被分成块的总数。
* chunkSize: 分块大小,根据 totalSize 和这个值你就可以计算出总共的块数。注意最后一块的大小可能会比这个要大。
* currentChunkSize: 当前块的大小,实际大小。
* totalSize: 文件总大小。
* identifier: 这个就是每个文件的唯一标示,md5码
* filename: 文件名。
* relativePath: 文件夹上传的时候文件的相对路径属性。
* 一个分块可以被上传多次,当然这肯定不是标准行为,,这种重传也但是在实际上传过程中是可能发生这种事情的是本库的特性之一。
* <p>
* 根据响应码认为成功或失败的:
* 200 文件上传完成
* 201 文加快上传成功
* 500 第一块上传失败,取消整个文件上传
* 507 服务器出错自动重试该文件块上传
*/
@Override
@Transactional(rollbackFor = Exception.class)
public int postFileUpload(FileChunk chunk, HttpServletResponse response) {
MultipartFile file = chunk.getFile();
Integer result = 0;
log.debug("file originName: {}, chunkNumber: {}", file.getOriginalFilename(), chunk.getChunkNumber());
Path path = Paths.get(generatePath(filePath + folderPath, chunk));
try {
Files.write(path, chunk.getFile().getBytes());
log.debug("文件 {} 写入成功, md5:{}", chunk.getFilename(), chunk.getIdentifier());
// 设置主键id
chunk.setId(IdUtils.longUUID());
// 设置创建人
chunk.setCreateBy(SecurityUtils.getUserId().toString());
result = chunkMapper.insertFileChunk(chunk);
//写入数据库
} catch (IOException e) {
e.printStackTrace();
response.setStatus(507);
return result;
}
return result;
}
/**
* 功能描述:生成块文件所在地址
*
*/
private String generatePath(String uploadFolder, FileChunk chunk) {
StringBuilder sb = new StringBuilder();
//文件夹地址/md5
sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
//判断uploadFolder/identifier 路径是否存在,不存在则创建
if (!Files.isWritable(Paths.get(sb.toString()))) {
log.info("path not exist,create path: {}", sb.toString());
try {
Files.createDirectories(Paths.get(sb.toString()));
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
//文件夹地址/md5/文件名-1
return sb.append("/")
.append(chunk.getFilename())
.append("-")
.append(chunk.getChunkNumber()).toString();
}
@Override
@Transactional(rollbackFor = Exception.class)
public AjaxResult mergeFile(FileList fileInfo) {
String filename = fileInfo.getFilename();
String folder = filePath + folderPath + "/" + fileInfo.getIdentifier();
String file = folder + "/" + filename;
String url = null;
try {
// 1. 检查文件是否已存在于数据库
if (fileListMapper.selectSingleBackFilelist(fileInfo) > 0) {
Map<String, Object> map = new HashMap<>();
map.put("url", fileListMapper.selectFileListByIdentifier(fileInfo.getIdentifier()));
return AjaxResult.success(map);
}
// 2. 合并文件
File mergeFile = merge(file, folder, filename);
MockMultipartFile transferMergeFile = FileToMultipartFile(mergeFile);
// 3. 压缩文件
MultipartFile compressFile = compress(transferMergeFile);
// 4. 上传文件
String objectKey = getObjectKey(transferMergeFile);
try (FileInputStream fis = new FileInputStream(InputStreamConvertMultipartFileUtil.MultipartFileToFile(compressFile));
ObsClient obsClient = getObsClient(this.obsClient)) {
PutObjectResult putObjectResult = obsClient.putObject(bucketName, objectKey, fis);
obsClient.setObjectAcl(bucketName, objectKey, AccessControlList.REST_CANNED_PUBLIC_READ);
// 替换为自定义URL
url = putObjectResult.getObjectUrl().replace(SOURCE_URL, TARGET_URL);
// 5. 文件信息写入数据库
fileInfo.setId(IdUtils.longUUID());
fileInfo.setLocation(file);
fileInfo.setUrl(url);
log.debug("此时的url",url);
fileInfo.setCreateBy(SecurityUtils.getUserId().toString());
if (fileListMapper.insertFileList(fileInfo) > 0) {
// 插入文件记录成功后,删除chunk表中的对应记录,释放空间
FileChunk backChunk = new FileChunk();
backChunk.setIdentifier(fileInfo.getIdentifier());
backChunk.setFilename(fileInfo.getFilename());
backChunk.setUpdateBy(SecurityUtils.getUserId().toString());
chunkMapper.deleteBackChunkByIdentifier(backChunk);
// 删除本地合并后的文件
if (mergeFile.exists()) {
boolean deleted = mergeFile.delete();
if (!deleted) {
log.error("无法删除本地合并文件: {}", mergeFile.getPath());
} else {
log.debug("本地合并文件已删除: {}", mergeFile.getPath());
}
}
// 检查并删除空目录
File folderFile = new File(folder);
if (folderFile.exists() && folderFile.isDirectory()) {
String[] files = folderFile.list();
// 确保 list() 返回的不是 null
if (files == null) {
log.error("无法读取目录: {}", folderFile.getPath());
} else if (files.length == 0) {
boolean deletedFolder = folderFile.delete();
if (!deletedFolder) {
log.error("无法删除空目录: {}", folderFile.getPath());
} else {
log.debug("空目录已删除: {}", folderFile.getPath());
}
} else {
log.debug("目录非空,无法删除: {}", folderFile.getPath());
}
}
}
}
} catch (Exception e) {
log.error("文件合并或上传错误: {}", e.getMessage(), e);
return AjaxResult.error("上传失败");
}
// 6. 返回上传结果
Map<String, Object> map = new HashMap<>();
map.put("url", url);
log.debug("此时的urlinfo - - - - {}",url);
return AjaxResult.success(map);
}
public String getObjectKey(MultipartFile file){
// 获取文件的扩展名
String extension = FileTypeUtils.getExtension(file);
// 目标存储的文件名,由 md5 和文件扩展名组成
String objectKey = IdUtils.simpleUUID()+"." + extension;
String folderName = "";
if (StringUtils.equalsAnyIgnoreCase(extension, MimeTypeUtils.IMAGE_EXTENSION)){
folderName = "5S/images/";
}else{
folderName = "5S/video/";
}
objectKey = folderName + objectKey;
return objectKey;
}
/**
* 文件合并
*
* @param targetFile 要形成的文件名
* @param folder 要形成的文件夹地址
* @param filename 文件的名称
*/
public File merge(String targetFile, String folder, String filename) {
File file = new File(targetFile);
try {
Files.createFile(Paths.get(targetFile));
Files.list(Paths.get(folder))
.filter(path -> !path.getFileName().toString().equals(filename))
.sorted((o1, o2) -> {
String p1 = o1.getFileName().toString();
String p2 = o2.getFileName().toString();
int i1 = p1.lastIndexOf("-");
int i2 = p2.lastIndexOf("-");
return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
})
.forEach(path -> {
try {
//以追加的形式写入文件
Files.write(Paths.get(targetFile), Files.readAllBytes(path), StandardOpenOption.APPEND);
//合并后删除该块
Files.delete(path);
} catch (IOException e) {
log.error(e.getMessage(), e);
}
});
} catch (IOException e) {
log.error(e.getMessage(), e);
}
return file;
}
4. 通用枚举常量
private static final String SOURCE_URL = "ringpai-oa.obs.cn-north-4xxxx";
private static final String TARGET_URL = "asdfgfggf";
private final static String folderPath = "/file";