springboot+jpa+vue实现大文件断点续传

1 篇文章 0 订阅
1 篇文章 0 订阅

导入sql,如果里面有数据清除即可

/*
 Navicat Premium Data Transfer

 Source Server         : localhost
 Source Server Type    : MySQL
 Source Server Version : 50723
 Source Host           : localhost:3306
 Source Schema         : test

 Target Server Type    : MySQL
 Target Server Version : 50723
 File Encoding         : 65001

 Date: 05/04/2020 17:43:14
*/

SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;

-- ----------------------------
-- Table structure for chunk
-- ----------------------------
DROP TABLE IF EXISTS `chunk`;
CREATE TABLE `chunk`  (
  `id` bigint(20) NOT NULL,
  `chunk_number` int(11) NOT NULL,
  `chunk_size` bigint(20) NOT NULL,
  `current_chunk_size` bigint(20) NOT NULL,
  `filename` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `identifier` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `relative_path` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `total_chunks` int(11) NOT NULL,
  `total_size` bigint(20) NOT NULL,
  `type` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  PRIMARY KEY (`id`) USING BTREE
) ENGINE = MyISAM CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;

-- ----------------------------
-- Records of chunk
-- ----------------------------
INSERT INTO `chunk` VALUES (1, 1, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (2, 2, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (3, 3, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (4, 4, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (5, 5, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (6, 6, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (7, 6, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (8, 7, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (9, 8, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (10, 9, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (11, 10, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (12, 11, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (13, 12, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (14, 13, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (15, 14, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (16, 15, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (17, 16, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (18, 17, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (19, 18, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (20, 19, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (21, 20, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (22, 21, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (23, 22, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (24, 23, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (25, 24, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (26, 25, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (27, 26, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (28, 27, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (29, 28, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (30, 29, 10485760, 10485760, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);
INSERT INTO `chunk` VALUES (31, 30, 10485760, 14466741, 'f30.mp4', '318553781-f30mp4', 'f30.mp4', 30, 318553781, NULL);

-- ----------------------------
-- Table structure for file_info
-- ----------------------------
DROP TABLE IF EXISTS `file_info`;
CREATE TABLE `file_info`  (
  `id` bigint(20) NOT NULL,
  `filename` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `identifier` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `location` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `total_size` bigint(20) NOT NULL,
  `type` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  PRIMARY KEY (`id`) USING BTREE
) ENGINE = MyISAM CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;

-- ----------------------------
-- Records of file_info
-- ----------------------------
INSERT INTO `file_info` VALUES (32, 'f30.mp4', '318553781-f30mp4', 'D:/upload/318553781-f30mp4/f30.mp4', 318553781, 'video/mp4');

-- ----------------------------
-- Table structure for hibernate_sequence
-- ----------------------------
DROP TABLE IF EXISTS `hibernate_sequence`;
CREATE TABLE `hibernate_sequence`  (
  `next_val` bigint(20) NULL DEFAULT NULL
) ENGINE = MyISAM CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Fixed;

-- ----------------------------
-- Records of hibernate_sequence
-- ----------------------------
INSERT INTO `hibernate_sequence` VALUES (33);
INSERT INTO `hibernate_sequence` VALUES (33);

SET FOREIGN_KEY_CHECKS = 1;

创建springboot项目,并导入依赖

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.0.3.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.example</groupId>
    <artifactId>boot-upload</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>boot-upload</name>
    <description>Demo project for Spring Boot</description>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
        <java.version>1.8</java.version>
        <lombok.version>1.16.20</lombok.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jpa</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
        </dependency>

        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>${lombok.version}</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>

</project>

创建实体类

@Data
@Entity
@Table(name = "chunk")
public class Chunk {
    @Id
    @GeneratedValue
    private Long id;
    /**
     * 当前文件块,从1开始
     */
    @Column(nullable = false)
    private Integer chunkNumber;
    /**
     * 分块大小
     */
    @Column(nullable = false)
    private Long chunkSize;
    /**
     * 当前分块大小
     */
    @Column(nullable = false)
    private Long currentChunkSize;
    /**
     * 总大小
     */
    @Column(nullable = false)
    private Long totalSize;
    /**
     * 文件标识
     */
    @Column(nullable = false)
    private String identifier;
    /**
     * 文件名
     */
    @Column(nullable = false)
    private String filename;
    /**
     * 相对路径
     */
    @Column(nullable = false)
    private String relativePath;
    /**
     * 总块数
     */
    @Column(nullable = false)
    private Integer totalChunks;
    /**
     * 文件类型
     */
    @Column
    private String type;

    @Transient
    private MultipartFile file;
}
package com.example.bootupload.model;

import lombok.Data;

import javax.persistence.*;
import java.io.Serializable;

/**
 * @author 李云江
 * @Description: 文件信息$
 * @date 2020-04-05 17:02
 */
@Data
@Entity
@Table(name = "file_info")
public class FileInfo implements Serializable {
    @Id
    @GeneratedValue
    private Long id;

    @Column(nullable = false)
    private String filename;

    @Column(nullable = false)
    private String identifier;

    @Column(nullable = false)
    private Long totalSize;

    @Column(nullable = false)
    private String type;

    @Column(nullable = false)
    private String location;
}

定义dao接口和业务层代码
public interface FileInfoRepository extends JpaRepository<FileInfo,Long> {
}
public interface ChunkRepository extends JpaRepository<Chunk, Long>, JpaSpecificationExecutor<Chunk> {
}
public interface ChunkService {
    /**
     * 保存文件块
     *
     * @param chunk
     */
    void saveChunk(Chunk chunk);

    /**
     * 检查文件块是否存在
     *
     * @param identifier
     * @param chunkNumber
     * @return
     */
    boolean checkChunk(String identifier, Integer chunkNumber);
}
public interface FileInfoService {

    FileInfo addFileInfo(FileInfo fileInfo);

}
@Service
public class ChunkServiceImpl implements ChunkService {
    @Resource
    private ChunkRepository chunkRepository;
    @Override
    public void saveChunk(Chunk chunk) {
        chunkRepository.save(chunk);
    }

    @Override
    public boolean checkChunk(String identifier, Integer chunkNumber) {
        Specification<Chunk> specification = (Specification<Chunk>) (root, criteriaQuery, criteriaBuilder) -> {
            List<Predicate> predicates = new ArrayList<>();
            predicates.add(criteriaBuilder.equal(root.get("identifier"), identifier));
            predicates.add(criteriaBuilder.equal(root.get("chunkNumber"), chunkNumber));

            return criteriaQuery.where(predicates.toArray(new Predicate[predicates.size()])).getRestriction();
        };

        return chunkRepository.findOne(specification).orElse(null) == null;
    }
}
@Service
public class FileInfoServiceImpl implements FileInfoService {
    @Resource
    private FileInfoRepository fileInfoRepository;

    @Override
    public FileInfo addFileInfo(FileInfo fileInfo) {
        return fileInfoRepository.save(fileInfo);
    }
}
控制层controller
@RestController
@RequestMapping("/uploader")
@Slf4j
public class UploadController {
    @Value("${prop.upload-folder}")
    private String uploadFolder;
    @Resource
    private FileInfoService fileInfoService;
    @Resource
    private ChunkService chunkService;

    @PostMapping("/chunk")
    public String uploadChunk(Chunk chunk) {
        MultipartFile file = chunk.getFile();
        log.debug("file originName: {}, chunkNumber: {}", file.getOriginalFilename(), chunk.getChunkNumber());
        try {
            byte[] bytes = file.getBytes();
            Path path = Paths.get(generatePath(uploadFolder, chunk));
            //文件写入指定路径
            Files.write(path, bytes);
            log.debug("文件 {} 写入成功, uuid:{}", chunk.getFilename(), chunk.getIdentifier());
            chunkService.saveChunk(chunk);

            return "文件上传成功";
        } catch (IOException e) {
            e.printStackTrace();
            return "后端异常...";
        }
    }

    @GetMapping("/chunk")
    public Object checkChunk(Chunk chunk, HttpServletResponse response) {
        if (chunkService.checkChunk(chunk.getIdentifier(), chunk.getChunkNumber())) {
            response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
        }

        return chunk;
    }

    @PostMapping("/mergeFile")
    public String mergeFile(FileInfo fileInfo) {
        String filename = fileInfo.getFilename();
        String file = uploadFolder + "/" + fileInfo.getIdentifier() + "/" + filename;
        String folder = uploadFolder + "/" + fileInfo.getIdentifier();
        merge(file, folder, filename);
        fileInfo.setLocation(file);
        fileInfoService.addFileInfo(fileInfo);

        return "合并成功";
    }
}
FileUtils工具类
@Slf4j
public class FileUtils {
    /**
     * @description:文件路径
     * @author: 李云江
     * @date: 2020/4/5
     */
    public static String generatePath(String uploadFolder, Chunk chunk) {
        StringBuilder sb = new StringBuilder();
        sb.append(uploadFolder).append("/").append(chunk.getIdentifier());
        //判断uploadFolder/identifier 路径是否存在,不存在则创建
        if (!Files.isWritable(Paths.get(sb.toString()))) {
            log.info("path not exist,create path: {}", sb.toString());
            try {
                Files.createDirectories(Paths.get(sb.toString()));
            } catch (IOException e) {
                log.error(e.getMessage(), e);
            }
        }

        return sb.append("/")
                .append(chunk.getFilename())
                .append("-")
                .append(chunk.getChunkNumber()).toString();
    }
    /**
     * @description:文件合并
     * @author: 李云江
     * @date: 2020/4/5
     */
    public static void merge(String targetFile, String folder, String filename) {
        try {
            Files.createFile(Paths.get(targetFile));
            Files.list(Paths.get(folder))
                    .filter(path -> !path.getFileName().toString().equals(filename))
                    .sorted((o1, o2) -> {
                        String p1 = o1.getFileName().toString();
                        String p2 = o2.getFileName().toString();
                        int i1 = p1.lastIndexOf("-");
                        int i2 = p2.lastIndexOf("-");
                        return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
                    })
                    .forEach(path -> {
                        try {
                            //以追加的形式写入文件
                            Files.write(Paths.get(targetFile), Files.readAllBytes(path), StandardOpenOption.APPEND);
                            //合并后删除该块
                            Files.delete(path);
                        } catch (IOException e) {
                            log.error(e.getMessage(), e);
                        }
                    });
        } catch (IOException e) {
            log.error(e.getMessage(), e);
        }
    }
}

配置文件application.yml
server:
  port: 8081
  servlet:
    path: /boot
  connection-timeout: 18000000

spring:
  servlet:
    multipart:
      max-file-size: 20MB
      max-request-size: 20MB
  datasource:
    url: jdbc:mysql://localhost:3306/test?characterEncoding=utf-8&useSSL=false
    username: root
    password: 123456
    driver-class-name: com.mysql.jdbc.Driver
  jpa:
    properties:
      hibernate:
        hbm2ddl:
          auto: create-drop
    show-sql: false

logging:
  level:
    org.boot.uploader.*: debug
#文件保存地址
prop:
  upload-folder: D:/upload

至此服务端基本完成,至于前端的话借鉴github大神的vue-upload直接在上面修改的
####### app.vue

<template>
  <uploader :options="options" :file-status-text="statusText" class="uploader-example" ref="uploader"
            @file-complete="fileComplete" @complete="complete"></uploader>
</template>

<script>
  import axios from 'axios'
  import qs from 'qs'

  export default {
    data() {
      return {
        options: {
          target: '/boot/uploader/chunk',
          testChunks: true,
          simultaneousUploads: 1,
          chunkSize: 10 * 1024 * 1024
        },
        attrs: {
          accept: 'image/*'
        },
        statusText: {
          success: '成功了',
          error: '出错了',
          uploading: '上传中',
          paused: '暂停中',
          waiting: '等待中'
        }
      }
    },
    methods: {
      // 上传完成
      complete() {
        console.log('complete', arguments)
      },
      // 一个根文件(文件夹)成功上传完成。
      fileComplete() {
        console.log('file complete', arguments)
        const file = arguments[0].file;
        axios.post('/boot/uploader/mergeFile', qs.stringify({
          filename: file.name,
          identifier: arguments[0].uniqueIdentifier,
          totalSize: file.size,
          type: file.type
        })).then(function (response) {
          console.log(response);
        }).catch(function (error) {
          console.log(error);
        });
      }
    },
    mounted() {
      this.$nextTick(() => {
        window.uploader = this.$refs.uploader.uploader
      })
    }
  }
</script>

配置说明:

  1. target 目标上传 URL,可以是字符串也可以是函数,如果是函数的话,则会传入 Uploader.File 实例、当前块 Uploader.Chunk 以及是否是测试模式,默认值为 ‘/’。
  2. chunkSize 分块时按照该值来分。最后一个上传块的大小是可能是大于等于1倍的这个值但是小于两倍的这个值大小,默认 110241024。
    testChunks 是否测试每个块是否在服务端已经上传了,主要用来实现秒传、跨浏览器上传等,默认true。
    3.simultaneousUploads 并发上传数,默认3。
跨域问题

这里使用了http-proxy-middleware这个node中间件,可以对前端的请求进行转发,转发到指定的路由。
在index.js中进行配置,如下:

dev: {
    env: require('./dev.env'),
    port: 8080,
    autoOpenBrowser: true,
    assetsSubDirectory: '',
    assetsPublicPath: '/',
    proxyTable: {
      '/boot': {
        target: 'http://localhost:8081',
        changeOrigin: true  //如果跨域,则需要�配置此项
      }
    },
    // CSS Sourcemaps off by default because relative paths are "buggy"
    // with this option, according to the CSS-Loader README
    // (https://github.com/webpack/css-loader#sourcemaps)
    // In our experience, they generally work as expected,
    // just be aware of this issue when enabling this option.
    cssSourceMap: false
  }

npm install
npm run dev
启动项目就可以了

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值