针对于大文件上传进行代码整理,也方便后续使用,方便整理学习
实体类BigFile
@Data
public class BigFile implements Serializable {
private static final long serialVersionUID = -6956947981866795431L;
private Long id;
private String name;
private String md5;
private String path;
private Date uploadTime;
public BigFile() {
}
public BigFile(String name, String md5, String path, Date uploadTime) {
this.name = name;
this.md5 = md5;
this.path = path;
this.uploadTime = uploadTime;
}
}
BigFileUploadController
@RestController
@RequestMapping("/bigFileUpload")
@CrossOrigin(origins = "*", allowCredentials = "true")
public class BigFileUploadController {
@Autowired
private BigFileService fileService;
@PostMapping("/")
public ServiceFile upload(String name,
String md5,
Long size,
Integer chunks,
Integer chunk,
MultipartFile file,
String status) throws IOException {
if (chunks != null && chunks != 0) {
ServiceFile sf = fileService.uploadWithBlock(name, md5, size, chunks, chunk, file, status);
return sf;
} else {
fileService.upload(name, md5, file);
return null;
}
}
}
BigFileService(接口)
public interface BigFileService {
void upload(String name,
String md5,
MultipartFile file) throws IOException;
ServiceFile uploadWithBlock(String name,
String md5,
Long size,
Integer chunks,
Integer chunk,
MultipartFile file,
String status) throws IOException;
boolean checkMd5(String md5);
}
BigFileServiceImpl(接口实现类)
@Service
public class BigFileServiceImpl implements BigFileService {
private final String LAST_NUM = "2";
@Autowired
private Econfig econfig;
@Override
public void upload(String name, String md5, MultipartFile file) throws IOException {
String path = econfig.getSaveFilePath()+ generateFileName();
BigFileUtils.write(path, file.getInputStream());
// fileDao.save(new File(name, md5, path, new Date()));
}
@Override
public ServiceFile uploadWithBlock(String name, String md5, Long size, Integer chunks, Integer chunk, MultipartFile file,String status) throws IOException {
String fileName = getFileName(md5, chunks);
BigFileUtils.writeWithBlok(econfig.getSaveFilePath() + fileName, size, file.getInputStream(), file.getSize(), chunks, chunk);
addChunk(md5,chunk);
if (isUploaded(md5)) {
removeKey(md5);
// fileDao.save(new File(name, md5,UploadConfig.path + fileName, new Date()));
}
ServiceFile sf = new ServiceFile();
if (LAST_NUM.equals(status)){
//构建ServiceFile对象
}
return sf;
}
@Override
public boolean checkMd5(String md5) {
BigFile file = new BigFile();
file.setMd5(md5);
return true;
// return fileDao.getByFile(file) == null;
}
}
需要使用的两个工具类
BigFileUtils
public class BigFileUtils {
/**
* 写入文件
* @param target
* @param src
* @throws IOException
*/
public static void write(String target, InputStream src) throws IOException {
OutputStream os = new FileOutputStream(target);
byte[] buf = new byte[1024];
int len;
while (-1 != (len = src.read(buf))) {
os.write(buf,0,len);
}
os.flush();
os.close();
}
/**
* 分块写入文件
* @param target
* @param targetSize
* @param src
* @param srcSize
* @param chunks
* @param chunk
* @throws IOException
*/
public static void writeWithBlok(String target, Long targetSize, InputStream src, Long srcSize, Integer chunks, Integer chunk) throws IOException {
RandomAccessFile randomAccessFile = new RandomAccessFile(target,"rw");
randomAccessFile.setLength(targetSize);
if (chunk == chunks - 1) {
randomAccessFile.seek(targetSize - srcSize);
} else {
randomAccessFile.seek(chunk * srcSize);
}
byte[] buf = new byte[1024];
int len;
while (-1 != (len = src.read(buf))) {
randomAccessFile.write(buf,0,len);
}
randomAccessFile.close();
}
/**
* 生成随机文件名
* @return
*/
public static String generateFileName() {
return UUID.randomUUID().toString();
}
}
UploadUtils
public class UploadUtils {
/**
* 内部类记录分块上传文件信息
*/
private static class Value {
String name;
boolean[] status;
Value(int n) {
this.name = generateFileName();
this.status = new boolean[n];
}
}
private static Map<String, Value> chunkMap = new HashMap<>();
/**
* 判断文件所有分块是否已上传
* @param key
* @return
*/
public static boolean isUploaded(String key) {
if (isExist(key)) {
for (boolean b : chunkMap.get(key).status) {
if (!b) {
return false;
}
}
return true;
}
return false;
}
/**
* 判断文件是否有分块已上传
* @param key
* @return
*/
private static boolean isExist(String key) {
return chunkMap.containsKey(key);
}
/**
* 为文件添加上传分块记录
* @param key
* @param chunk
*/
public static void addChunk(String key, int chunk) {
chunkMap.get(key).status[chunk] = true;
}
/**
* 从map中删除键为key的键值对
* @param key
*/
public static void removeKey(String key) {
if (isExist(key)) {
chunkMap.remove(key);
}
}
/**
* 获取随机生成的文件名
* @param key
* @param chunks
* @return
*/
public static String getFileName(String key, int chunks) {
if (!isExist(key)) {
synchronized (UploadUtils.class) {
if (!isExist(key)) {
chunkMap.put(key, new Value(chunks));
}
}
}
return chunkMap.get(key).name;
}
}
总结
总体思路:
1,前端进行文件切片
2,前端发送请求中包括
String name----原文件文件名称
String md5----原文件MD5
Long size----原文件总大小
Integer chunks----切块数量
Integer chunk----切块排序
MultipartFile file----切块后文件
String status----是否最后一个切块文件,可自行定义,这里我定义的字符串"2"
3.后端采用RandomAccessFile 类进行文件的接收,针对于该类,有兴趣的小伙伴可以自行查资料学习,挺方便实用的.
4.针对于每次切片后的文件进行合并,转态码等于2时为最后一个文件,然后进行合并文件使用RandomAccessFile
5.再次解释一下status ,该状态码仅仅是我实现功能时的一个字段,当最后一个切块合并完之后,构建一个ServiceFile类返回给前端,这样方便后续的一些字段存储.
提示
代码中可能有需要优化或者错误地方请指出,勿喷勿喷!!!