1、引入的依赖
<!--redis依赖配置-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<!--FastDfs-->
<dependency>
<groupId>com.github.tobato</groupId>
<artifactId>fastdfs-client</artifactId>
<version>1.26.7</version>
</dependency>
2、分片上传参数实体类
@Data
@AllArgsConstructor
@NoArgsConstructor
@ToString
public class MultipartFileParam {
/**
* 当前要上传第几分片
*/
private long chunkNumber;
/**
* 每个分片的大小
*/
private long chunkSize;
/**
* 分片总数
*/
private long totalChunks;
/**
* 文件唯一标识
*/
private String identifier;
/**
* 分块文件传输对象
*/
private MultipartFile file;
}
3、分片上传文件
分片上传文件的思想:首先,前端先加载检测文件大小,并将文件按照一定大小拆分成若干等分,同时为每一个分片文件按顺序标好序号;然后前端根据文件序号按顺序请求上传文件接口;最后,FastDFS将接收到的文件按顺序追加整合成完整的文件。
在实际工作中,分片上传文件会遇到一些问题,如:
- 前端请求上传文件不总是按照分片序号的顺序去请求
- 重复上传分片文件
以下代码主要解决这两个问题:
//先检查已经上传到第几片了
String checkUploadChunkNum = (String)redisService.get(CommonConstant.uploadChunkNum+identifier);
if (checkUploadChunkNum==null && chunkNumber!=1){
//如果当前即将上传的分片不是第一片,而且又查询不到之前的之前已经存在的分片记录,则上传失败
throw new SystemException(ResultEnum.ERR.getCode(),"上传失败");
}else if (checkUploadChunkNum!=null && chunkNumber-1<Long.valueOf(checkUploadChunkNum)){
//当前即将上传的分片序号-1小于已经上传的分片序号,说明已经重复上传了
throw new SystemException(ResultEnum.ERR.getCode(),"重复上传");
}else if (checkUploadChunkNum!=null && chunkNumber-1>Long.valueOf(checkUploadChunkNum)){
//如果当前即将上传的分片序号-1大于已经上传的分片序号,说明提前上传了,需要等待前面序号的分片上传完才能开始上传
//循环最多执行100次,最多执行100*100/1000=10(秒)
int time = 100;
while (time>0){
//每次循环休眠100毫秒
Thread.sleep(100);
time--;
//每次循环检查已经上传分片的最新序号
checkUploadChunkNum = (String) redisService.get(CommonConstant.uploadChunkNum+identifier);
if (chunkNumber-1==Long.valueOf(checkUploadChunkNum)){
//如果当前准备上传的分片序号-1等于已经上传分片的最新序号,则跳出循环
break;
}
}
}
完整代码:
@RestController
public class FastDFSController {
/**
* 视频缓存时间,秒:1天;
*/
private final Integer cacheTime = 60 * 60 * 24;
@Autowired
private AppendFileStorageClient appendFileStorageClient;
@Autowired
private RedisService redisService;
@PostMapping("/uploadFile")
public String uploadFile(MultipartFileParam param){
//当前第几分片
long chunkNumber = param.getChunkNumber();
//每个分片的大小
long chunkSize = param.getChunkSize();
//分片总数
long totalChunks = param.getTotalChunks();
//文件唯一标识
String identifier = param.getIdentifier();
//分块文件传输对象
MultipartFile file = param.getFile();
//文件后缀名
String fileName = FileUtil.extName(file.getOriginalFilename());
//历史上传文件大小
long historyUploadSize = (chunkNumber-1)*chunkSize;
//包含组和服务器文件路径,例如:StorePath [group=group1, path=M00/00/00/xxx.xxx]
StorePath storePath = null;
//服务器文件路径,例如:M00/00/00/xxx.xxx
String groundPath;
try{
//先检查已经上传到第几片了
String checkUploadChunkNum = (String)redisService.get(CommonConstant.uploadChunkNum+identifier);
if (checkUploadChunkNum==null && chunkNumber!=1){
//如果当前即将上传的分片不是第一片,而且又查询不到之前的之前已经存在的分片记录,则上传失败
throw new SystemException(ResultEnum.ERR.getCode(),"上传失败");
}else if (checkUploadChunkNum!=null && chunkNumber-1<Long.valueOf(checkUploadChunkNum)){
//当前即将上传的分片序号-1小于已经上传的分片序号,说明已经重复上传了
throw new SystemException(ResultEnum.ERR.getCode(),"重复上传");
}else if (checkUploadChunkNum!=null && chunkNumber-1>Long.valueOf(checkUploadChunkNum)){
//如果当前即将上传的分片序号-1大于已经上传的分片序号,说明提前上传了,需要等待前面序号的分片上传完才能开始上传
//循环最多执行100次,最多执行100*100/1000=10(秒)
int time = 100;
while (time>0){
//每次循环休眠100毫秒
Thread.sleep(100);
time--;
//每次循环检查已经上传分片的最新序号
checkUploadChunkNum = (String) redisService.get(CommonConstant.uploadChunkNum+identifier);
if (chunkNumber-1==Long.valueOf(checkUploadChunkNum)){
//如果当前准备上传的分片序号-1等于已经上传分片的最新序号,则跳出循环
break;
}
}
}
//若上传的是第一片,且只有一片,上传完就结束
if (chunkNumber == 1 ){
if (checkUploadChunkNum !=null){
throw new SystemException(ResultEnum.ERR.getCode(),"第一片已经上传");
}
storePath = appendFileStorageClient.uploadAppenderFile(CommonConstant.DEFAULT_GROUP, file.getInputStream(),file.getSize(), fileName);
//记录当前传入第一片
redisService.set(CommonConstant.uploadChunkNum+identifier,String.valueOf(chunkNumber),cacheTime);
if (storePath == null){
throw new SystemException(ResultEnum.ERR.getCode(),"第一片上传失败");
}
//总共只有一片就返回
if (totalChunks == 1){
//如果只有一片,直接返回结果
return storePath.getPath();
}else {
//记录已存存文件路径
redisService.set(CommonConstant.fastDfsPath+identifier,storePath.getPath(),cacheTime);
return "第:"+chunkNumber+"上传成功";
}
} else {
if (chunkNumber-1 == Long.valueOf(checkUploadChunkNum)){
//获取已存文件路径
groundPath = (String) redisService.get(CommonConstant.fastDfsPath+identifier);
if (groundPath == null){
throw new SystemException(ResultEnum.ERR.getCode(),"获取文件路径失败");
}
//追加文件
appendFileStorageClient.modifyFile(CommonConstant.DEFAULT_GROUP, groundPath, file.getInputStream(),file.getSize(), historyUploadSize);
//修改已存文件片数
redisService.set(CommonConstant.uploadChunkNum+identifier,String.valueOf(chunkNumber),cacheTime);
if (chunkNumber == totalChunks){
//最后一片,返回结果
return groundPath;
}
return "第:"+chunkNumber+"上传成功";
}else {
logger.error("第:"+chunkNumber+"上传失败,原因:当前上传");
throw new SystemException(ResultEnum.ERR.getCode(),"第:"+chunkNumber+"上传失败");
}
}
}catch (Exception e){
e.printStackTrace();
logger.error("第:"+chunkNumber+"上传失败,原因:{}",e);
throw new SystemException(ResultEnum.ERR.getCode(),"第:"+chunkNumber+"上传失败");
}
}
}
4、CommonConstant类
public class CommonConstant {
private final static String uploading="Uploading:";
private final static String file=uploading+"file:";
/**
* 记录当前文件上传了多少片
*/
public final static String uploadChunkNum=file+"chunkNum:";
/**
* 当前文件上传到fastdfs路径
* */
public final static String fastDfsPath=file+"fastDfsPath:";
/**
* 默认分组
*/
public final static String DEFAULT_GROUP = "group1";
}