断点续传,切片上传java源码(AmazonS3 版)

1.controller层

@Api(tags = "文件分块上传与下载")
@RestController
@RequestMapping("/upload")
@Slf4j
public class UploadPartController {

    @Autowired
    private UploadPartService uploadPartService;

    @ApiOperation(value = "开启分片上传")
    @PostMapping("/initiateMultipartUpload")
    @GlobalApiResponses
    public Response initiateMultipartUpload( String filename, Integer type){
        return Response.withData(uploadPartService.initiateMultipartUpload(filename,type));
    }

    @ApiOperation(value = "分片上传")
    @PutMapping("/uploadPart")
    @GlobalApiResponses
    public Response uploadPart(@RequestParam("file") MultipartFile multipartFile, FileUploadPart fileUploadPart) throws IOException {
        Integer partResult = uploadPartService.uploadPart(multipartFile,fileUploadPart);
        return Response.withData(partResult);
    }

    @ApiOperation(value = "终止分片上传")
    @PostMapping("/abortUpload")
    @GlobalApiResponses
    public Response abortUpload( FileUploadPart fileUploadPart) throws IOException {
        uploadPartService.abortUpload(fileUploadPart);
        return Response.withData("终止成功");
    }

    @ApiOperation(value = "完成分片上传")
    @PostMapping("/completeUpload")
    @GlobalApiResponses
    public Response completeUpload( FileUploadPart fileUploadPart){
        return Response.withData(uploadPartService.completeUpload(fileUploadPart));
    }

    @ApiOperation(value = "列出正在进行的分段上传")
    @GetMapping("/listMultipartUploads")
    @GlobalApiResponses
    public Response listMultipartUploads(){
        MultipartUploadListing multipartUploadListing = uploadPartService.listMultipartUploads();
        return Response.withData(multipartUploadListing);
    }

@ApiOperation(value = "列出已经上传完成的分段")
    @GetMapping("/listParts")
    @GlobalApiResponses
    public Response listParts(String filename,String fileId){
        List<Integer> uploadSuccessIndex = uploadPartService.listParts(filename, fileId);
        return Response.withData(uploadSuccessIndex);
    }

    @ApiOperation(value = "删除文件")
    @DeleteMapping("/deletePartUploads")
    @GlobalApiResponses
    public Response deletePartUploads(String filename){
        uploadPartService.deletePartUploads(filename);
        MultipartUploadListing multipartUploadListing = uploadPartService.listMultipartUploads();
        return Response.withData(multipartUploadListing);
    }

    @ApiOperation(value = "下载文件")
    @GetMapping("/download")
    @GlobalApiResponses
    public Response download(String filename){
        return Response.withData(uploadPartService.download(filename));
    }

2.service层

public interface UploadPartService {
    FileInitiateDTO initiateMultipartUpload(String filename, Integer type);

    Integer uploadPart(MultipartFile multipartFile, FileUploadPart fileUploadPart);

    void abortUpload(FileUploadPart fileUploadPart);

    CompleteMultipartUploadResult completeUpload(FileUploadPart fileUploadPart);

    MultipartUploadListing listMultipartUploads();

    void deletePartUploads(String filename);

    List<Integer> listParts(String filename, String fileId);

    String download(String filename);

    Object delete(String filename);
}

3.serviceImpl实现层

@Service
public class UploadPartServiceImpl implements UploadPartService {

    @Autowired
    private RedisTemplate<String,Object> redisTemplate;

    @Autowired
    private UserUtil userUtil;

    private Lock lock = new ReentrantLock();

    @Value("${ceph.bucketName}")
    private  String bucketName;

    private static AmazonS3 amazonS3 = GetConnection();

    private static Map<String,Object> map = new Hashtable<>();

    @Override
    public FileInitiateDTO initiateMultipartUpload(String filename, Integer type) {
        filename = FileCategory.getFileCategory(type).toString()+"-"+userUtil.getCurrentJWTUser().getId()+"-"+System.currentTimeMillis()+"-"+filename;
        InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(bucketName, filename);
        InitiateMultipartUploadResult initiateMultipartUploadResult = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);
        return new FileInitiateDTO(initiateMultipartUploadResult.getUploadId(), filename);
    }

    @Override
    public Integer uploadPart(MultipartFile multipartFile, FileUploadPart fileUploadPart) {
        Integer uploadSuccessNumber;
        byte[] md5s = null;
        InputStream inputStream = null;
        List<PartETag> partETags ;
        try {
            md5s = MessageDigest.getInstance("MD5").digest(multipartFile.getBytes());
            inputStream = multipartFile.getInputStream();
        } catch (NoSuchAlgorithmException | IOException e) {
            throw new BusinessException("分块上传MD5加密出错");
        }
        UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withUploadId(fileUploadPart.getFileId())
                .withKey(fileUploadPart.getFilename())
                .withMD5Digest(Base64.encodeAsString(md5s))
                .withPartNumber(fileUploadPart.getIndex())
                .withPartSize(multipartFile.getSize())
                .withInputStream(inputStream);
        UploadPartResult uploadPartResult = amazonS3.uploadPart(uploadPartRequest);
        lock.lock();
        try{
            if (redisTemplate.opsForValue().get(fileUploadPart.getFileId())==null){
                redisTemplate.opsForValue().set(fileUploadPart.getFileId(),fileUploadPart.getIndex());
                
//                partETags = new ArrayList<>();
            }else {
                String uploadSuccessIndex = redisTemplate.opsForValue().get(fileUploadPart.getFileId()).toString();
                while(uploadSuccessIndex.indexOf(fileUploadPart.getIndex().toString())<0) {
                    uploadSuccessIndex += "," + fileUploadPart.getIndex();
                    redisTemplate.opsForValue().set(fileUploadPart.getFileId(), uploadSuccessIndex);
//                partETags =(List<PartETag>)map.get(fileUploadPart.getFileId());
                }
            }
//            partETags.add(uploadPartResult.getPartETag());
//            uploadSuccessNumber = partETags.size();
//            map.put(fileUploadPart.getFileId(),partETags);
            redisTemplate.opsForValue().set(fileUploadPart.getFilename()+fileUploadPart.getIndex(),uploadPartResult.getETag());
            uploadSuccessNumber = redisTemplate.opsForValue().get(fileUploadPart.getFileId()).toString().split(",").length;
        }finally {
            lock.unlock();
        }
        return uploadSuccessNumber;
    }
@Override
    public void abortUpload(FileUploadPart fileUploadPart) {
        AbortMultipartUploadRequest uploadRequest = new AbortMultipartUploadRequest(bucketName,fileUploadPart.getFilename(),fileUploadPart.getFileId());
        amazonS3.abortMultipartUpload(uploadRequest);
        redisTemplate.delete(fileUploadPart.getFileId());
        redisTemplate.delete(fileUploadPart.getFilename());
    }

    @Override
    public CompleteMultipartUploadResult completeUpload(FileUploadPart fileUploadPart) {
//        List<PartETag> partETags = (List<PartETag>) map.get(fileUploadPart.getFileId());
       
        String[] uploadSuccessIndexs = redisTemplate.opsForValue().get(fileUploadPart.getFileId()).toString().split(",");
        List<PartETag> partETags = Arrays.stream(uploadSuccessIndexs).map(s -> {
            String eTag = redisTemplate.opsForValue().get(fileUploadPart.getFilename() + s).toString();
            redisTemplate.delete(fileUploadPart.getFilename()+s);
            return new PartETag(Integer.parseInt(s),eTag);
        }).collect(Collectors.toList());
        while(true) {
            if (uploadSuccessIndexs.length!=0 && uploadSuccessIndexs.length == fileUploadPart.getPartTotal()) {
                break;
            }
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            //            partETags = (List<PartETag>) map.get(fileUploadPart.getFileId());
            partETags = Arrays.stream(uploadSuccessIndexs).map(s -> {
                String eTag = redisTemplate.opsForValue().get(fileUploadPart.getFilename() + s).toString();
                redisTemplate.delete(fileUploadPart.getFilename()+s);
                return new PartETag(Integer.parseInt(s),eTag);
            }).collect(Collectors.toList());
        }
        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(bucketName, fileUploadPart.getFilename(), fileUploadPart.getFileId(), partETags);
        CompleteMultipartUploadResult completeMultipartUploadResult = amazonS3.completeMultipartUpload(completeMultipartUploadRequest);
        redisTemplate.delete(fileUploadPart.getFileId());
        redisTemplate.delete(fileUploadPart.getFilename());
        return completeMultipartUploadResult;
    }

    @Override
    public MultipartUploadListing listMultipartUploads() {
        ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName);
        return amazonS3.listMultipartUploads(listMultipartUploadsRequest);
    }

    @Override
    public void deletePartUploads(String filename) {
        amazonS3.deleteObject(bucketName,filename);
    }
    @Override
    public List<Integer> listParts(String filename, String fileId) {
        ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, filename, fileId);
        PartListing partListing = amazonS3.listParts(listPartsRequest);
        List<PartSummary> parts = partListing.getParts();
        List<Integer> uploadSuccessIndex = parts.stream().map(s -> s.getPartNumber()).collect(Collectors.toList());
        return uploadSuccessIndex;
    }

    @Override
    public String download(String filename) {
        return GenerateFileURL(amazonS3,bucketName,filename);
    }

    @Override
    public Object delete(String filename) {
        return redisTemplate.delete(filename);
    }
}

4.工具类

@Slf4j
@Component
public class CephOriginUtil {
    private static   String accessKey="你的accessKey";
    private static   String secretKey="你的secretKey";
    private static   String endpoint ="你的endpoint ";

    @Value("${ceph.accessKey}")
    public void setAccessKey(String accessKey) {
        CephOriginUtil.accessKey = accessKey;
    }

    @Value("${ceph.secretKey}")
    public void setSecretKey(String secretKey) {
        CephOriginUtil.secretKey = secretKey;
    }

    @Value("${ceph.endpoint}")
    public void setEndpoint(String endpoint) {
        CephOriginUtil.endpoint = endpoint;
    }

 public static AmazonS3 GetConnection(String accessKey,String secretKey,String endpoint){
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        AmazonS3 conn = new AmazonS3Client(credentials);
        conn.setEndpoint(endpoint);
        return  conn;
    }

    public static AmazonS3 GetConnection(){
        return GetConnection(accessKey, secretKey, endpoint);
    }


 /**
     * 生成对象的下载urls
     * @param conn
     */
    public static String GenerateFileURL(AmazonS3 conn,String bucketName,String fileName){
        GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName,fileName);
        return conn.generatePresignedUrl(request).toString();
    }
/**
     * 删除一个对象
     * @param conn
     * @param bucketName
     */
    public static void  DeleteFile(AmazonS3 conn,String bucketName,String fileName){
        conn.deleteObject(bucketName,fileName);
    }

实体类(方法里没用到的属性 可以不传)

@Data
@AllArgsConstructor
@NoArgsConstructor

public class FileUploadPart {
    @ApiModelProperty(value = "当前所属文件id")
    private String fileId;
    @ApiModelProperty(value = "文件名称(带后缀)")
    private String filename;
    @ApiModelProperty(value = "切片索引")
    private Integer index;
    @ApiModelProperty(value = "切片数量")
    private Integer partTotal;
}

配置文件(配置文件公司的 只能这样写了)

	ceph:
  		accessKey: 你的accessKey
  		secretKey: 你的accessKey
  		endpoint:  存储的主机IP和端口 (http://ip:端口)
  		bucketName: 你的bucketName
  		
  • 0
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 8
    评论
Amazon S3Amazon Simple Storage Service)是亚马逊公司提供的一种弹性、可扩展的云存储服务。它提供了一种简单的方式来存储和检索任意数量的数据。为了增强用户体验,Amazon S3引入了断点续传功能。 断点续传是指当文件传输中途中断或失败时,可以从之前中断的地方继续传输,而不需要重新开始传输整个文件。这对于大型文件或网络条件不稳定的情况非常有用。 在使用Amazon S3进行文件上传时,如果传输过程中出现中断或网络异常,传统的方式是重新开始传输整个文件。但使用断点续传功能,可以从之前中断的地方继续上传Amazon S3通过将文件分成较小的部分(称为分片)来实现断点续传。每个分片的大小通常为5-10 MB。在上传文件时,首先将文件拆分成分片,然后将每个分片进行传输。 当传输中断时,系统会记录已成功上传的分片索引和位置。在恢复传输时,只需从下一个未成功上传的分片继续上传即可,这样可以避免重新上传整个文件。一旦所有的分片都被上传成功,它们将重新组合成完整的文件。 除了断点续传功能之外,Amazon S3还提供了其他优势。它具有高可靠性,数据被分散存储在多个数据中心,可以容忍单个数据中心的故障。同时,它也具有高扩展性,可以自动适应数据量的增长。 总之,Amazon S3断点续传功能使文件上传更加可靠和高效。通过分片上传和记录传输进度,用户可以从中断处恢复,减少了重新上传整个文件的需求,提高了传输效率。
评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值