springboot中使用大文件上传

大文件上传需要用到三个接口:检查文件上传的进度接口、上传块文件接口、合并文件接口

大致流程:首先用户上传文件到前端,前端切割文件分成一个个块,传输文件时需要携带文件特殊标识来检查文件上传的进度,根据返回的进度前端就能知道当前文件传了多少块,该从第几块传,使用上传文件接口上传块文件,当所有的块文件都上传完之后开始合并文件返回上传成功信息,具体代码如下:

Controller层:

package com.example.demo.bigupload.controller;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import com.example.demo.bigupload.model.Chunk;
import com.example.demo.bigupload.model.FileInfo;
import com.example.demo.bigupload.service.BigFileUploadService;
import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.nio.file.*;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* @ClassName: BigFileUploadController
* @Description: 大文件上传
 */
@RestController
@CrossOrigin(origins = "*")
@RequestMapping(value = "/bigfileupload")
@Slf4j
public class BigFileUploadController {
	
    @Autowired
    BigFileUploadService bigFileUploadService;
    
    /**
     * 文件存储路临时路径,yml文件上配置
     */
    @Value("${file_temp_path}")
    private String fileTempPath;
 
    /**
     * 文件存储的真实路径,yml文件上配置
     */
    @Value("${file_real_path}")
    private String fileRealPath;
    
    /**
     * @Description: 检查文件上传的进度
     * @param: chunk(identifier,chunkNumber)
     * @param: response
     * @return: java.lang.Object 
     */
    @GetMapping("/chunk")
    public Object checkChunk(Chunk chunk, HttpServletResponse response) {
        Map<String, Object> result = new HashMap<>(16);
        //用于存放上传成功的块number
        List<Integer> successChunks = new ArrayList<>(50);
        try {
            //判断文件之前是否上传过
            boolean flag = bigFileUploadService.checkChunk(chunk.getIdentifier(), chunk.getChunkNumber());
            if (flag) {
                //未上传,返回信息
                response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
                result.put("uploaded_chunks", successChunks);
                log.info("demo.bigupload.controller.FileUploadController.checkChunk result:" + result.toString());
                return result;
            }
            //查询成功的块
            successChunks = bigFileUploadService.checkSucessChunks(chunk.getIdentifier());
            if (successChunks != null && !successChunks.isEmpty()) {
                result.put("uploaded_chunks", successChunks);
                log.info("demo.bigupload.controller.FileUploadController.checkChunk result:" + result.toString());
                return result;
            }
        } catch (Exception e) {
            log.error(e.getMessage());
        }
        return result;
    }
    
    /**
     * @Description: 上传文件,断点续传
     * @param: chunk(filename,identifier,currentChunkSize,chunkNumber,file)
     * @return: java.lang.Object
     */
    @PostMapping("/chunk")
    public Object uploadChunk(Chunk chunk) {
        log.info("demo.bigupload.controller.FileUploadController.uploadChunk params:" + chunk.toString());
        Map<String, Object> result = new HashMap<>(16);
        MultipartFile file = chunk.getFile();
        try {
            byte[] bytes = file.getBytes();
            Path temPath = Paths.get(generatePath(fileTempPath, chunk));
            //先将文件块写入临时路径
            Files.write(temPath, bytes);
            log.debug("文件 {} 写入成功, uuid:{}", chunk.getFilename(), chunk.getIdentifier());
            File temFile = new File(generatePath(fileTempPath, chunk)); 
            /*
             * 判断上传后的文件块大小是否等于上传文件块实际大小。如果是则保存文件块信息
             * 如果不是,删除文件块并失败。
             */
            if(temFile.length() == chunk.getCurrentChunkSize()) {
            	bigFileUploadService.saveChunk(chunk);
            }else {
                Files.delete(temPath);
                result.put("code", "000001");
                result.put("message", "error");
                log.info("文件块大小不匹配,上传失败:" + chunk.toString());
                return result;
            }
            result.put("code", "000000");
            result.put("message", "sucess");
            log.info("demo.bigupload.controller.FileUploadController.uploadChunk result:" + result.toString());
            return result;
        } catch (Exception e) {
            e.printStackTrace();
            result.put("code", "000001");
            result.put("message", "error");
            log.info("demo.bigupload.controller.FileUploadController.uploadChunk result:" + result.toString());
            return result;
        }
    }
 
    /**
     * @Description: 文件合并
     * @param fileInfo(filename,identifier)
     * @return: java.lang.String
     */
    @PostMapping("/mergeFile")
    public Object mergeFile(@RequestBody FileInfo fileInfo) {
        log.info("demo.bigupload.controller.FileUploadController.mergeFile params:" + fileInfo.toString());
        Map<String, Object> result = new HashMap<>(16);
        try {
            //先查询文件库中是否有该文件
            FileInfo file = bigFileUploadService.getFileStoryInfo(fileInfo.getIdentifier());
            if (file == null) {
                //存储文件生成的itemId返给前端做相应的关联
                String itemId = UUID.randomUUID().toString().replace("-", "").toLowerCase();
                //获取文件的原名称
                String filename = fileInfo.getFilename();
                //重新组装文件名字
                String fileName = itemId + getFileNameSuffix(filename);
                //文件的父级目录
                String rootPath = fileRealPath + getNewDateToString();
                //最终文件保存位置
                String targetPath = rootPath + fileName;
                //存储文件块的临时位置
                String filepathTem = fileTempPath + "/" + fileInfo.getIdentifier();
                //创建目标地址
                if (!Files.isWritable(Paths.get(rootPath))) {
                    Files.createDirectories(Paths.get(rootPath));
                }
                //合并文件
                merge(targetPath, filepathTem, filename);
                fileInfo.setItemId(itemId);
                fileInfo.setLocation(targetPath);
                bigFileUploadService.addFileInfo(fileInfo);
                result.put("code", "000000");
                result.put("message", "sucess");
                //用于存放文件信息
                JSONObject fileArr = new JSONObject();
                //存储文件的位置
                fileArr.put("attachPath", targetPath);
                fileArr.put("itemId", itemId);
                result.put("files", fileArr);
            } else {
            	result.put("code", "000000");
                result.put("message", "sucess");
                JSONObject fileArr = new JSONObject();
                fileArr.put("attachPath", file.getLocation());
                fileArr.put("itemId", file.getItemId());
                result.put("files", fileArr);
            }
            log.info("demo.bigupload.controller.FileUploadController.mergeFile result:" + result.toString());
            return result;
        } catch (Exception e) {
            e.printStackTrace();
            result.put("code", "000001");
            result.put("message", "error");
            log.info("demo.bigupload.controller.FileUploadController.mergeFile result:" + result.toString());
            return result;
        }
    }
 
    /**
     * @Description: 拼接文件上传的临时路径
     */
    private String generatePath(String uploadFolder, Chunk chunk) {
        StringBuilder sb = new StringBuilder();
        sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
        //判断文件路径是否存在,不存在则创建
        if (!Files.isWritable(Paths.get(sb.toString()))) {
            log.info("path not exist,create path: {}", sb.toString());
            try {
                Files.createDirectories(Paths.get(sb.toString()));
            } catch (IOException e) {
                log.error(e.getMessage(), e);
            }
        }
        return sb.append("/")
                .append(chunk.getFilename())
                .append("-")
                .append(chunk.getChunkNumber()).toString();
    }
 
    /**
     * @Description: 合并文件操作
     */
    private void merge(String targetFile, String tempFile, String filename) {
        try {
            //创建目标文件
            Files.createFile(Paths.get(targetFile));
            Files.list(Paths.get(tempFile))
                    .filter(path -> !path.getFileName().toString().equals(filename))
                    .sorted((o1, o2) -> {
                        String p1 = o1.getFileName().toString();
                        String p2 = o2.getFileName().toString();
                        int i1 = p1.lastIndexOf("-");
                        int i2 = p2.lastIndexOf("-");
                        return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
                    })
                    .forEach(path -> {                    
                        try {
                            //以追加的形式写入文件
                            Files.write(Paths.get(targetFile), Files.readAllBytes(path), StandardOpenOption.APPEND);
                            //合并后删除该块
                            Files.delete(path);
                        } catch (IOException e) {
                            log.error(e.getMessage(), e);
                        }
                    });
            //删除临时目录
            Files.deleteIfExists(Paths.get(tempFile));
        } catch (IOException e) {
            log.error(e.getMessage(), e);
        }
    }

    /**
     * @Description: 获取文件的后缀名
     */
    public String getFileNameSuffix(String fileName) {
        String fileSuffix = "";
        String str1 = ".";
        String str2 = "。";
        if (org.apache.commons.lang.StringUtils.isNotEmpty(fileName)) {
            if (fileName.contains(str1)) {
                fileSuffix = fileName.substring(fileName.lastIndexOf("."));
            }
            if (fileName.contains(str2)) {
                fileSuffix = fileName.substring(fileName.lastIndexOf("."));
            }
        }
        return fileSuffix;
    }
    
    /**
     * @Description: 获取当前日期 (/yyyy/MM/dd/格式)
     */
    public String getNewDateToString() {
        SimpleDateFormat sf = new SimpleDateFormat("/yyyy/MM/dd/");
        return sf.format(new Date());
    }
    
}

Service接口:

package com.example.demo.bigupload.service;

import java.util.List;
import com.example.demo.bigupload.model.Chunk;
import com.example.demo.bigupload.model.FileInfo;
/**
 * @description: 大文件上传业务接口
 **/
public interface BigFileUploadService {

	/**
	 * @description: 判断文件之前是否上传过
	 */
	boolean checkChunk(String identifier, Integer chunkNumber);

	/**
	 * @description: 查询成功的块
	 */
	List<Integer> checkSucessChunks(String identifier);

	/**
	 * @description: 保存文件的块信息
	 */
	void saveChunk(Chunk chunk);
	
	/**
	 * @description: 查询文件库中是否有该文件
	 */	
	FileInfo getFileStoryInfo(String identifier);

	/**
	 * @description: 添加存储文件记录
	 */	
	void addFileInfo(FileInfo fileInfo);

}

ServiceImpl:

package com.example.demo.bigupload.service;

import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.example.demo.bigupload.mapper.BigFileUploadMapper;
import com.example.demo.bigupload.model.Chunk;
import com.example.demo.bigupload.model.FileInfo;

@Service
public class BigFileUploadServiceImpl implements BigFileUploadService{

	@Value("${file_real_path}")
        private String fileRealPath;
	
	@Autowired
        private BigFileUploadMapper bigFileUploadMapper;
	
	/**
	 * @description: 判断文件之前是否上传过
	 */
	@Override
	public boolean checkChunk(String identifier, Integer chunkNumber) {
		Chunk chunk = bigFileUploadMapper.checkChunk(identifier, chunkNumber);
                if (chunk != null) {
                    return false;
                }
                return true;
	}

	/**
	 * @description: 查询成功的块
	 */
	@Override
	public List<Integer> checkSucessChunks(String identifier) {
		List<Integer> resultList = new ArrayList<>();
        List<Chunk> result;
        if (StringUtils.isNotBlank(identifier)) {//判断identifier字符串是否不为空且长度不为0且不由空白符构成,等于 !isBlank(String str)
        	/**
        	 * isEmpty()和isBlank()的区别
        	 * isEmpty():要求没有任何字符,即str==null 或  str.length()==0
			 * isBlank():要求是空白字符,即无意义字符。其实isBlank判断的空字符是包括了isEmpty的换句话说
			 * isEmpty判断的范围更小,只是在没有字符的情况下;而isBlank()则包括了含空字符的情况
        	 */
            result = bigFileUploadMapper.checkSucessChunks(identifier);
            if (result != null && !result.isEmpty()) {
                for (Chunk chunk : result) {
                    resultList.add(chunk.getChunkNumber());
                }
            }
        }
        return resultList;
	}

	/**
	 * @description: 保存文件的块信息
	 */
	@Override
	public void saveChunk(Chunk chunk) {
		bigFileUploadMapper.saveChunk(chunk);
	}

	/**
	 * @description: 查询文件库中是否有该文件
	 */
	@Override
	public FileInfo getFileStoryInfo(String identifier) {
		return bigFileUploadMapper.getFileStoryInfo(identifier);
	}

	/**
	 * @description: 添加存储文件记录
	 */
	@Override
	public void addFileInfo(FileInfo fileInfo) {
		bigFileUploadMapper.addFileInfo(fileInfo);
	}

}

Dao层(不一定非要用mysql存储,其他数据库也可以):

package com.example.demo.bigupload.mapper;

import java.util.List;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Param;
import com.example.demo.bigupload.model.Chunk;
import com.example.demo.bigupload.model.FileInfo;

@Mapper
public interface BigFileUploadMapper {

	/**
	 * @Description: 判断文件之前是存在
	 */
	public Chunk checkChunk(@Param("identifier")String identifier, @Param("chunkNumber")Integer chunkNumber);

	/**
	 * @Description: 查询成功的块
	 */
	public List<Chunk> checkSucessChunks(String identifier);

	/**
	 * @Description: 保存文件的块信息
	 */
	public void saveChunk(Chunk chunk);

	/**
	 * @description: 查询文件库中是否有该文件
	 */
	public FileInfo getFileStoryInfo(String identifier);

	/**
	 * @description: 添加存储文件记录
	 */
	public void addFileInfo(FileInfo fileInfo);
	
}

实体类:

Chunk(块)

package com.example.demo.bigupload.model;

import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
 
/**
* @ClassName: Chunk
* @Description: 文件块信息实体类
 */
@Data
public class Chunk {
 
    //当前文件块编号(如:第一块、第二块...)*
    private Integer chunkNumber;
    
    //文件标识(MD5)
    private String identifier;
 
    //分块大小
    private Long chunkSize;
 
    //当前分块大小
    private Long currentChunkSize;
 
    //文件总大小
    private Long totalSize;
 
    //文件总块数
    private Integer totalChunks;
   
    //文件名称
    private String filename;
 
    //文件相对路径
    private String relativePath;
 
    //文件类型
    private String type;
 
    //文件对象
    private MultipartFile file;
 
}

文件信息(FileInfo)

package com.example.demo.bigupload.model;

import lombok.Data;

/**
* @ClassName: FileInfo
* @Description: 文件信息实体
 */
@Data
public class FileInfo {

    //文件名称
    private String filename;

    //文件MD5值
    private String identifier;

    //文件总大小
    private Long totalSize;

    //文件类型
    private String type;

    //文件存储地址
    private String location;

    //文件存放标识
    private String itemId;
	
}

xml文件:

<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.demo.bigupload.mapper.BigFileUploadMapper">

	<sql id="Chunk_List">
    	        id,chunkNumber,chunkSize,currentChunkSize,totalSize,totalChunks,identifier,filename,relativePath,type
  	</sql>
  	
  	<sql id="FileInfo_List">
    	        id,filename,identifier,totalSize,type,location,itemId
  	</sql>

 	<!--判断块文件之前是存在-->
	<select id="checkChunk" resultType="Chunk">
		SELECT
		<include refid="Chunk_List" /> 
		FROM chunk
		WHERE chunkNumber = #{chunkNumber} AND identifier = #{identifier}
	</select>
    
	<!--查询成功的块-->
	<select id="checkSucessChunks" resultType="Chunk">
		SELECT <include refid="Chunk_List" /> 
		FROM chunk 
		WHERE identifier = #{identifier}
	</select>

	<!--保存块-->
	<insert id="saveChunk" parameterType="Chunk">
		INSERT INTO chunk 
		(chunkNumber,chunkSize,currentChunkSize,totalSize,totalChunks,identifier,filename,relativePath,type)
		VALUE 
		(
			#{chunkNumber,jdbcType=INTEGER},
			#{chunkSize,jdbcType=BIGINT},
			#{currentChunkSize,jdbcType=BIGINT},
			#{totalSize,jdbcType=BIGINT},
			#{totalChunks,jdbcType=INTEGER},
			#{identifier,jdbcType=VARCHAR},
			#{filename,jdbcType=VARCHAR},
			#{relativePath,jdbcType=VARCHAR},
			#{type,jdbcType=VARCHAR}
			)
	</insert>
	
	<!--查询文件库中是否有该文件-->
	<select id="getFileStoryInfo" resultType="FileInfo">
		SELECT 
		<include refid="FileInfo_List" /> 
		FROM fileinfo
		WHERE identifier = #{identifier}
	</select>
    
	<!-- 添加存储文件记录 -->
	<insert id="addFileInfo" parameterType="FileInfo">
    		INSERT INTO fileinfo 
		(filename,identifier,totalSize,type,location,itemId)
		VALUE 
		(
			#{filename,jdbcType=VARCHAR},
			#{identifier,jdbcType=VARCHAR},
			#{totalSize,jdbcType=BIGINT},
			#{type,jdbcType=VARCHAR},
			#{location,jdbcType=VARCHAR},
			#{itemId,jdbcType=VARCHAR}
			)
	</insert>
    
</mapper>

application.yml文件:

server:
  port: 10000 # 服务器HTTP端口

spring:
  application:
    name: bigFileUpload # 应用名称
  datasource: 
    url: jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
    username: root
    password: root
    driver-class-name: com.mysql.cj.jdbc.Driver # JDBC驱动类,默认情况下,基于URL自动检测;6.x版本的MySQL JDBC驱动类为com.mysql.cj.jdbc.Driver,5.X版本的MySQL JDBC驱动类为com.mysql.jdbc.Driver
    type : com.alibaba.druid.pool.DruidDataSource # 要使用的连接池实现的完全限定名称
    druid: # 数据库连接池
      max-active: 30 # 连接池同时能维持的最大连接数
      max-wait: 60000 # 配置获取连接等待超时的时间,单位毫秒
      min-idle: 5 # 至少维持的空闲连接数
      initial-size: 5 #连接池启动时初始化连接数
      time-between-eviction-runs-millis: 60000 # 检差空闲连接间隔时间,单位毫秒
      min-evictable-idle-time-millis: 300000 # 连接池中连接可空闲的时间,单位毫秒
      validation-query: SELECT 1 # 检查连接可用性,mysql设置成SELECT 1,oracle设置成SELECT 1 FROM DUAL
      test-while-idle: true # 在检查闲置连接时同时检查连接可用性
      test-on-borrow: false # 在借出连接时检查连接可用性
      test-on-return: false # 在客户端归还连接时检查连接可用性
      pool-prepared-statements: false # 是否打开PSCache,缓存游标;PSCache对支持游标的数据库性能提升巨大,例如oracle,在mysql下建议关闭
      # max-pool-prepared-statement-per-connection-size: 20
      filters : stat,wall # 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
      connection-properties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500 # 打开mergeSql功能;慢SQL记录
      useGlobalDataSourceStat : true # 合并多个DruidDataSource的监控数据
      
mybatis:
  type-aliases-package: com.example.demo.bigupload.model # mybatis自动扫描自定义POJO路径
  mapper-locations: classpath:mapper/*.xml # 加载mapper.xml文件
  
file_temp_path: D:/dagdata/temp
file_real_path: D:/dagdata/real

需要用到的依赖:

<dependencies>
	<dependency>
		<groupId>org.springframework.boot</groupId>
		<artifactId>spring-boot-starter-web</artifactId>
	</dependency>
	<dependency>
		<groupId>org.springframework.boot</groupId>
		<artifactId>spring-boot-starter-web-services</artifactId>
	</dependency>
	<dependency>
		<groupId>org.springframework.boot</groupId>
		<artifactId>spring-boot-starter-test</artifactId>
		<scope>test</scope>
	</dependency>
	<dependency>
		<groupId>mysql</groupId>
		<artifactId>mysql-connector-java</artifactId>
		<scope>runtime</scope>
	</dependency>
	<dependency>
		<groupId>org.mybatis.spring.boot</groupId>
		<artifactId>mybatis-spring-boot-starter</artifactId>
		<version>1.3.2</version>
	</dependency>
	<dependency>
		<groupId>org.projectlombok</groupId>
		<artifactId>lombok</artifactId>
		<scope>provided</scope>
	</dependency>
	<dependency>
		<groupId>com.alibaba</groupId>
		<artifactId>druid-spring-boot-starter</artifactId>
		<version>1.1.9</version>
	</dependency>
	<dependency>
		<groupId>commons-lang</groupId>
		<artifactId>commons-lang</artifactId>
		<version>2.6</version>
	</dependency>
	<dependency>
		<groupId>commons-codec</groupId>
		<artifactId>commons-codec</artifactId>
		<version>1.10</version>
	</dependency>
	<dependency>
		<groupId>com.alibaba</groupId>
		<artifactId>fastjson</artifactId>
	<version>1.2.58</version>
	</dependency>
</dependencies>

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值