Java实现本地的分片上传和断点续传

前言

关于分片上传和断点续传这个需求,但凡涉及到文件服务的应用,无不需要考虑这个问题,我本人之前也发过两篇博客,一篇讲述分片的一些原理,一篇描述了分片的实现。我现在的一个项目里面就需要对文件进行分片上传,在之前我写了文章,这里我不赘述。但我们的项目使用的文件系统时FASTDFS,似乎总是出现问题(虽然不一定就是它的问题,但是项目里的其他人似乎觉得就是它的问题),可能有想拿掉他的趋势,自己写文件服务。所以我自己抽取项目里的上传功能,单独抽成了一个工具类,希望以后可以简单的复用,这里感谢孙老板,在百忙之中亲自指导实现!

《FastDfs大文件分片上传和断点续传》
《多线程分段下载》
《大文件分片上传与极速秒传实现》

代码

环境说明

  • JDK8
  • Redis
  • Maven3.X

后台依赖

 <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.73</version>
        </dependency>

        <!--redis-->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-redis</artifactId>
        </dependency>

        <dependency>
            <groupId>cn.hutool</groupId>
            <artifactId>hutool-all</artifactId>
            <version>5.7.2</version>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>

实体

import org.springframework.web.multipart.MultipartFile;

import java.io.Serializable;

/**
 * @Author: 朝花不迟暮
 * @Date: 2021/12/23 21:23
 * @Description:
 */
public class MultipartFileParam implements Serializable {
    private static final long serialVersionUID = 3238600879053243080L;
    private String taskId;//文件传输任务ID
    private long chunkNumber;//当前为第几分片
    private long chunkSize;//每个分块的大小
    private long totalChunks;//分片总数
    private long fileSize;
    private String fileName;
    private String identifier;//文件唯一标识
    private MultipartFile file;//分块文件传输对象

    public String getFileName() {
        return fileName;
    }

    public void setFileName(String fileName) {
        this.fileName = fileName;
    }

    public long getFileSize() {
        return fileSize;
    }

    public void setFileSize(long fileSize) {
        this.fileSize = fileSize;
    }

    public String getTaskId() {
        return taskId;
    }

    public void setTaskId(String taskId) {
        this.taskId = taskId;
    }

    public long getChunkNumber() {
        return chunkNumber;
    }

    public void setChunkNumber(long chunkNumber) {
        this.chunkNumber = chunkNumber;
    }

    public long getChunkSize() {
        return chunkSize;
    }

    public void setChunkSize(long chunkSize) {
        this.chunkSize = chunkSize;
    }

    public long getTotalChunks() {
        return totalChunks;
    }

    public void setTotalChunks(long totalChunks) {
        this.totalChunks = totalChunks;
    }

    public String getIdentifier() {
        return identifier;
    }

    public void setIdentifier(String identifier) {
        this.identifier = identifier;
    }

    public MultipartFile getFile() {
        return file;
    }

    public void setFile(MultipartFile file) {
        this.file = file;
    }
}

分片工具类

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.map.MapUtil;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.crypto.SecureUtil;
import com.zhbcm.outupload.common.constant.UpLoadConstant;
import com.zhbcm.outupload.common.response.ApiResult;
import com.zhbcm.outupload.common.response.ResultUtil;
import com.zhbcm.outupload.config.UploadProperties;
import com.zhbcm.outupload.entity.MultipartFileParam;
import org.apache.tomcat.util.http.fileupload.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import javax.activation.MimetypesFileTypeMap;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.*;
import java.net.URL;
import java.net.URLConnection;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;

/**
 * @author 朝花不迟暮
 * @version 1.0
 * @date 2021/6/28 23:22
 */
@Service
public class UploadFileUtil {
    private static final Logger log = LoggerFactory.getLogger(UploadFileUtil.class);
    private final String FORMAT = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd"));
    private final Integer cacheTime = 60 * 60 * 24;

    @Resource
    private UploadProperties uploadProperties;
    @Resource
    private RedisUtil redisUtil;

    /**
     * 分片上传与断点续传
     *
     * @param multipartFileParam 分片实体
     * @param targetPath         目标路径
     * @return 待定
     */
    public ApiResult uploadAppendFile(MultipartFileParam multipartFileParam, String targetPath) {
        Map<String, String> map = new HashMap<>();
        long chunk = multipartFileParam.getChunkNumber();
        long totalChunks = multipartFileParam.getTotalChunks();
        long fileSize = multipartFileParam.getFileSize();
        String taskId = multipartFileParam.getTaskId();
        MultipartFile file = multipartFileParam.getFile();
        String fileName = multipartFileParam.getFileName();
        String extName = FileUtil.extName(fileName);
        String separator = FileUtil.FILE_SEPARATOR;
        String localPath = targetPath + separator;
        File tempFile = null;
        RandomAccessFile raf = null;
        InputStream is = null;
        try {
            if (chunk == 1) {
                String tempFileName = taskId + fileName.substring(fileName.lastIndexOf(".")) + "_tmp";
                File fileDir = new File(localPath);
                if (!fileDir.exists()) {
                    fileDir.mkdirs();
                }
                tempFile = new File(localPath, tempFileName);
                if (!tempFile.exists()) {
                    tempFile.createNewFile();

                }
                raf = new RandomAccessFile(tempFile, "rw");
                is = file.getInputStream();
                raf.seek(0);
                int len = 0;
                byte[] bytes = new byte[1024 * 10];
                while ((len = is.read(bytes)) != -1) {
                    raf.write(bytes, 0, len);
                }
                raf.close();
                is.close();
                redisUtil.setObject(UpLoadConstant.chunkNum + taskId, chunk, cacheTime);
                redisUtil.setObject(UpLoadConstant.fastDfsPath + taskId, tempFile.getPath(), cacheTime);
                map.put("result", "上传成功");
            } else {
                String path = (String) redisUtil.getObject(UpLoadConstant.fastDfsPath + taskId);
                is = file.getInputStream();
                raf = new RandomAccessFile(path, "rw");
                raf.seek(fileSize);
                int len = 0;
                byte[] bytes = new byte[1024 * 10];
                while ((len = is.read(bytes)) != -1) {
                    raf.write(bytes, 0, len);
                }
                redisUtil.setObject(UpLoadConstant.chunkNum + taskId, chunk, cacheTime);
                raf.close();
                is.close();
            }
            String md5 = (String) redisUtil.getObject(UpLoadConstant.task + taskId);
            HashMap<String, String> redisMap = new HashMap<>();
            redisMap.put("fileSize", fileSize + "");
            redisMap.put("taskId", taskId);
            redisUtil.setHashAsMap(UpLoadConstant.fileMd5 + md5, redisMap, cacheTime);
            if (chunk == totalChunks) {
                String path = (String) redisUtil.getObject(UpLoadConstant.fastDfsPath + taskId);
                FileUtil.rename(new File(path), taskId + "." + extName, true);
                map.put("result", "上传完毕");
                redisUtil.del(UpLoadConstant.fileMd5 + md5);
                redisUtil.del(UpLoadConstant.task + taskId);
                redisUtil.del(UpLoadConstant.chunkNum + taskId);
                redisUtil.del(UpLoadConstant.fastDfsPath + taskId);
            }
        } catch (IOException e) {
            e.printStackTrace();
            String md5 = (String) redisUtil.getObject(UpLoadConstant.task + taskId);
            redisUtil.del(UpLoadConstant.fileMd5 + md5);
            redisUtil.del(UpLoadConstant.task + taskId);
            redisUtil.del(UpLoadConstant.chunkNum + taskId);
            redisUtil.del(UpLoadConstant.fastDfsPath + taskId);
            map.put("result", "上传异常");
        } finally {
            try {
                if (raf != null) {
                    raf.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
            try {
                if (is != null) {
                    is.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return ResultUtil.success(map);
    }

    /**
     * 校验md5值
     *
     * @param md5 md5
     * @return map
     */
    public Map<String, Object> checkMd5(String md5) {
        Map<String, Object> map = new HashMap<>();
        String fileSize = null;
        String taskId = null;
        md5 = SecureUtil.md5(md5);
        Map redisMap = redisUtil.getMap(UpLoadConstant.fileMd5 + md5);
        if (MapUtil.isNotEmpty(redisMap)) {
            fileSize = ((String) redisMap.get("fileSize"));
            taskId = ((String) redisMap.get("taskId"));
        }
        if (StrUtil.isNotEmpty(fileSize)) {
            map.put("fileSize", Long.parseLong(fileSize));
        } else {
            Map<String, Object> map1 = new HashMap<>();
            taskId = IdUtil.simpleUUID();
            map1.put("fileSize", 0);
            map1.put("taskId", taskId);
            redisUtil.setHashAsMap(UpLoadConstant.fileMd5 + md5, map1, cacheTime);
            redisUtil.setObject(UpLoadConstant.task + taskId, md5, cacheTime);
            map.put("fileSize", 0);
        }
        map.put("taskId", taskId);
        return map;
    }
}

说明:这里面一些工具类和返回类型都是我自己封装的,如果需要可以去码云上复制,我这里不做详细的展示

控制层

import com.zhbcm.outupload.common.response.ApiResult;
import com.zhbcm.outupload.common.response.ResultUtil;
import com.zhbcm.outupload.entity.MultipartFileParam;
import com.zhbcm.outupload.utils.UploadFileUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.Map;

/**
 * @author 朝花不迟暮
 * @version 1.0
 * @date 2021/6/26 20:25
 */
@RestController
public class FileController {
    @Autowired
    private UploadFileUtil uploadFileUtil;

    @GetMapping("/checkMd5")
    public ApiResult checkMd5(String md5) {
        Map<String, Object> map = uploadFileUtil.checkMd5(md5);
        return ResultUtil.success(map);
    }

    @PostMapping(value = "/chunkUpload")
    public ApiResult chunkUpload(MultipartFileParam multipartFileParam) {
        return uploadFileUtil.uploadAppendFile(multipartFileParam, "E:\\安装包");
    }
}

前端封装

function chunkUpload(file) {
    let fileMd5 = file.name + file.size + file.lastModified;
    $.ajax({
        url: '/checkMd5',
        type: 'GET',
        data: {"md5": fileMd5},
        success: function (res) {
            if (res) {
                const start = Number(res.data.fileSize);
                const taskId = res.data.taskId;
                if (res.data) {
                    upload(start, taskId, file);
                } else {
                    upload(0, taskId, file);
                }
            }
        },
        error: function (msg) {
            alert(msg);
        }
    })
}

function upload(start, taskId, file) {
    // 分片大小 5M
    const bytePercent = 1024 * 1024 * 5;
    // 通过文件大小除以分片大小得出总片数
    let totalChunks = Math.ceil(file.size / bytePercent);
    // 起始位置+分片数 如果大于文件大小,那么终点位置就是文件大小,反之就是前者
    let end = (start + bytePercent) > file.size ? file.size : (start + bytePercent);
    let fileName = file.name;
    // 分片文件
    let chunkFile = file.slice(start, end);
    // 当前分片数
    let currChunkNum = (start / bytePercent) + 1;
    let formData = new FormData();
    formData.append('file', chunkFile);
    formData.append("fileName", fileName);
    formData.append("fileSize", start);
    formData.append("taskId", taskId);
    formData.append("chunkNumber", currChunkNum);
    formData.append("chunkSize", bytePercent);
    formData.append("totalChunks", totalChunks);

    $.ajax({
        type: 'POST',
        url: '/chunkUpload',
        data: formData,
        // dataType: 'application/json',
        contentType: false,//很重要,指定为false才能形成正确的Content-Type
        processData: false,	//加入这属性    processData默认为true,为true时,提交不会序列化data。
        success: function (res) {
            if (res.data.result === '上传完毕') {
                alert("分片上传完成");
            } else {
                upload(end, taskId, file);
            }
        }
    });
}

页面

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>上传界面</title>
</head>
<script type="text/javascript" src="./js/chunkUpload.js"></script>
<script type="text/javascript" src="./js/jquery-3.4.1.js"></script>
<!--<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.5.1/jquery.min.js"></script>-->
<body>
<!--<form action="/upload" method="post" enctype="multipart/form-data">-->
<!--    <input type="file" name="file" value="请选择文件">-->
<!--    <input type="submit" value="上传">-->
<!--</form>-->

<table border="1px solid red">
    <tr>
        <td>文件1</td>
        <td>
            <input name="file" type="file" id="inputFile"/>
        </td>
    </tr>
    <tr>
        <td></td>
        <td>
            <button onclick="check()">提交</button>
        </td>
    </tr>
</table>
</body>
<script type="text/javascript">
    function check() {
        let file = $('#inputFile').get(0).files[0];
        chunkUpload(file);
    }
</script>
</html>

后续

文件分片上传和断点续传虽然理解起来不是很麻烦,但关键是要自己去实现。这里关于文件MD5唯一校验,有些人是认为需要拿整个文件进行md5加密做唯一值,这当然无可厚非,然弊端也很明显,如果上传文件很大那么md5加密的过程必然十分耗时得不偿失。故,我在这里仅使用文件的名称大小与最后修改时间的字符串相加进行md5加密,可以一定程度保证唯一性。

码云传送门

  • 8
    点赞
  • 27
    收藏
    觉得还不错? 一键收藏
  • 18
    评论
实现文件分片上传并且断点续传的一种常见方案是使用HTTP协议,将大文件进行分片上传,每个分片的大小可以根据具体情况设置,通常是几十KB到几百KB不等。上传过程中,服务器接收到每个分片后,将其存储到磁盘上。 同时,为了实现断点续传,客户端需要在上传前检查服务器上是否已经存在相同的文件。如果已存在,则客户端需要向服务器发送一个请求,以获取已上传分片的信息,然后继续上传上传分片。 下面是使用Java语言实现文件分片上传并且断点续传的示例代码: ```java import java.io.*; import java.net.HttpURLConnection; import java.net.URL; public class FileUploader { private static final String BOUNDARY = "----WebKitFormBoundary7MA4YWxkTrZu0gW"; public static void main(String[] args) throws Exception { String filePath = "C:\\test\\largeFile.zip"; String url = "http://localhost:8080/upload"; File file = new File(filePath); long fileSize = file.length(); int chunkSize = 1024 * 1024; // 1MB int chunkCount = (int) Math.ceil((double) fileSize / chunkSize); for (int i = 0; i < chunkCount; i++) { int start = i * chunkSize; int end = (i + 1) * chunkSize; if (end > fileSize) { end = (int) fileSize; } uploadChunk(url, file.getName(), i, chunkCount, start, end, file); } } private static void uploadChunk(String url, String fileName, int chunkIndex, int chunkCount, int start, int end, File file) throws Exception { URL uploadUrl = new URL(url); HttpURLConnection connection = (HttpURLConnection) uploadUrl.openConnection(); connection.setRequestMethod("POST"); connection.setDoOutput(true); connection.setRequestProperty("Content-Type", "multipart/form-data; boundary=" + BOUNDARY); DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream()); outputStream.writeBytes("--" + BOUNDARY + "\r\n"); outputStream.writeBytes("Content-Disposition: form-data; name=\"file\"; filename=\"" + fileName + "\"\r\n"); outputStream.writeBytes("Content-Type: application/octet-stream\r\n\r\n"); FileInputStream inputStream = new FileInputStream(file); inputStream.skip(start); byte[] buffer = new byte[1024]; int len; int uploadedBytes = start; while (uploadedBytes < end && (len = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, len); uploadedBytes += len; } inputStream.close(); outputStream.writeBytes("\r\n--" + BOUNDARY + "\r\n"); outputStream.writeBytes("Content-Disposition: form-data; name=\"chunkIndex\"\r\n\r\n"); outputStream.writeBytes(String.valueOf(chunkIndex) + "\r\n"); outputStream.writeBytes("--" + BOUNDARY + "\r\n"); outputStream.writeBytes("Content-Disposition: form-data; name=\"chunkCount\"\r\n\r\n"); outputStream.writeBytes(String.valueOf(chunkCount) + "\r\n"); outputStream.writeBytes("--" + BOUNDARY + "--\r\n"); outputStream.flush(); outputStream.close(); int responseCode = connection.getResponseCode(); if (responseCode != HttpURLConnection.HTTP_OK) { throw new RuntimeException("Failed to upload chunk: " + chunkIndex); } } } ``` 上述代码将文件分成若干个分片,每个分片大小为1MB,然后逐个上传到服务器。其中,`uploadChunk()`方法用于上传单个分片,它将分片数据和分片信息一起发送到服务器。服务器需要根据分片信息将所有分片组合成完整的文件。 此外,为了实现断点续传,还需要在服务器端实现一个接口,用于获取已上传分片的信息,并返回给客户端。在上传前,客户端需要向服务器发送一个请求,以获取已上传分片的信息,然后继续上传上传分片。在上传过程中,客户端需要记录已上传分片信息,以便在上传失败后能够恢复上传进度。
评论 18
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值