JAVA 整合 AWS S3(Amazon Simple Storage Service)文件上传,分片上传,删除,下载

转载请表明出处 https://blog.csdn.net/Amor_Leo/article/details/117705800 谢谢

JAVA 整合 AWS S3[Amazon Simple Storage Service]文件上传,分片上传,删除,下载

pom依赖

		<properties>
			<aws.version>1.11.415</aws.version>
		</properties>

        <dependency>
            <groupId>com.amazonaws</groupId>
            <artifactId>aws-java-sdk-s3</artifactId>
            <version>${aws.version}</version>
        </dependency>

yml配置

aws:
  s3:
    s3Url: https://xxxx.s3.us-east-2.amazonaws.com/ # S3服务器地址
    accessKey: xxxx # 账号key
    secretKey: xxxx # 私钥

Client配置

import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

/**
 * @author LHL
 */
@Configuration
@Slf4j
public class AmazonS3Config {

    /**
     * S3服务器地址
     */
    @Value("${aws.s3.s3Url}")
    @Getter
    private String s3Url;

    /**
     * 账号key
     */
    @Value("${aws.s3.accessKey}")
    private String accessKey;

    /**
     * 秘钥
     */
    @Value("${aws.s3.secretKey}")
    private String secretKey;


    /**
     * 初始化生成AmazonS3 客户端配置
     */
    @Bean(name = "amazonS3")
    public AmazonS3 getAmazonS3(){
        log.info("start create s3Client");
        ClientConfiguration config = new ClientConfiguration();
        // HTTPS or HTTP
        config.withProtocol(Protocol.HTTPS);
        // 设置AmazonS3使用的最大连接数
        config.setMaxConnections(200);
        // 设置socket超时时间
        config.setSocketTimeout(10000);
        // 设置失败请求重试次数
        config.setMaxErrorRetry(2);
        log.info("create s3Client success");
        return AmazonS3ClientBuilder.standard()
        // US_EAST_2 根据自己亚马逊服务器所在区域
                .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(s3Url, Regions.US_EAST_2.getName()))
                .withClientConfiguration(config)
                .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)))
                .withPathStyleAccessEnabled(true)
                .disableChunkedEncoding()
                .withForceGlobalBucketAccessEnabled(true)
                .build();
    }


    @Bean(name = "transferManager")
    public TransferManager getTransferManager(){
        return TransferManagerBuilder.standard().withS3Client(getAmazonS3()).build();
    }
}

AWS S3工具类

import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.Bucket;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.CreateBucketRequest;
import com.amazonaws.services.s3.model.DeleteBucketRequest;
import com.amazonaws.services.s3.model.DeleteObjectRequest;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.HeadBucketRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
import com.amazonaws.services.s3.model.ListBucketsRequest;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import com.amazonaws.services.s3.transfer.Upload;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

/**
 * aws s3 文件工具类
 *
 * @author LHL
 */
@Slf4j
@Component
public class AmazonS3Util {

    @Resource
    private AmazonS3 amazonS3;

    private static AmazonS3 s3Client;

    @PostConstruct
    public void init() {
        s3Client = amazonS3;
    }

    /**
     * 上传文件
     * @author LHL
     * @param multipartFile 文件
     * @param bucketName 桶名
     */
    public static String uploadFile(MultipartFile multipartFile, String bucketName) {
        InputStream inputStream = null;
        try {
            createBucket(bucketName);
            String fileKey = getFileKey(multipartFile.getOriginalFilename());
            inputStream = multipartFile.getInputStream();
            Date expireDate = new Date(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(30));
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setHttpExpiresDate(expireDate);
            metadata.setContentType(multipartFile.getContentType());
            metadata.setContentLength(multipartFile.getSize());
            PutObjectRequest request = new PutObjectRequest(bucketName, fileKey, inputStream, metadata);
            //配置文件访问权限
            request.withCannedAcl(CannedAccessControlList.BucketOwnerFullControl);
            s3Client.putObject(request);
            return bucketName + "/" + fileKey;
        } catch (Exception e) {
            log.error("uploadFile error {} ", e.getMessage());
        } finally {
            if (inputStream != null) {
                try {
                    inputStream.close();
                } catch (Exception e) {
                    log.error("inputStream close error {} ", e.getMessage());
                }
            }
        }
        return null;
    }

    /**
     * 分片上传文件
     *
     * @author LHL
     * @param multipartFile 文件
     * @param bucketName 桶名
     * @return java.lang.String
     */
    public static String multipartUpload(MultipartFile multipartFile, String bucketName) {
        log.info("开始上传");
        InputStream inputStream = null;
        // Set part size to 5 MB.
        long partSize = 5 * 1024 * 1024;
        try {
            String keyName = getFileKey(multipartFile.getOriginalFilename());
            long contentLength =multipartFile.getSize();
            createBucket(bucketName);
            List<PartETag> partETags = new ArrayList<PartETag>();
            // Initiate the multipart upload.
            ObjectMetadata metadata = new ObjectMetadata();
            Date expireDate = new Date(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(30));
            metadata.setHttpExpiresDate(expireDate);
            metadata.setContentType(multipartFile.getContentType());
            metadata.setContentLength(multipartFile.getSize());
            InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName, metadata);
            InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
            // Upload the file parts.
            long filePosition = 0;
            for (int i = 1; filePosition < contentLength; i++) {
                // Because the last part could be less than 5 MB, adjust the part size as needed.
                inputStream = multipartFile.getInputStream();
                partSize = Math.min(partSize, (contentLength - filePosition));
                // Create the request to upload a part.
                UploadPartRequest uploadRequest = new UploadPartRequest()
                        .withBucketName(bucketName)
                        .withKey(keyName)
                        .withUploadId(initResponse.getUploadId())
                        .withPartNumber(i)
                        .withFileOffset(filePosition)
                        .withInputStream(inputStream)
                        .withPartSize(partSize);
                // Upload the part and add the response's ETag to our list.
                UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest);
                partETags.add(uploadResult.getPartETag());

                filePosition += partSize;
            }
            // Complete the multipart upload.
            CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName, initResponse.getUploadId(), partETags);
            s3Client.completeMultipartUpload(compRequest);
            return bucketName + "/" + keyName;
        } catch (Exception e) {
            log.error("uploadFile error {} ", e.getMessage());
        } finally {
            if (inputStream != null) {
                try {
                    inputStream.close();
                } catch (Exception e) {
                    log.error("inputStream close error {} ", e.getMessage());
                }
            }
        }
        return null;
    }

    /**
     * 分片上传文件
     *
     * @author LHL
     * @param multipartFile 文件
     * @param bucketName 桶名
     * @return java.lang.String
     */
    public static String heightMultipartUpload(MultipartFile multipartFile, String bucketName) {
        log.info("开始上传");
        String keyName = getFileKey(multipartFile.getOriginalFilename());
        try {
            createBucket(bucketName);
            TransferManager tm = TransferManagerBuilder.standard()
                    .withS3Client(s3Client)
                    .build();
            // TransferManager processes all transfers asynchronously,
            // so this call returns immediately.
            ObjectMetadata metadata = new ObjectMetadata();
            Date expireDate = new Date(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(30));
            metadata.setHttpExpiresDate(expireDate);
            metadata.setContentType(multipartFile.getContentType());
            metadata.setContentLength(multipartFile.getSize());
            Upload upload = tm.upload(bucketName, keyName, multipartFile.getInputStream(), metadata);
            log.info("Object upload started");
            // Optionally, wait for the upload to finish before continuing.
            upload.waitForCompletion();

            return bucketName + "/" + keyName;
        } catch (Exception e) {
            log.error("uploadFile error {} ", e.getMessage());
       }
        return null;
    }


    /**
     * 下载
     *
     * @param bucketName 桶名
     * @param fileKey key
     */
    public static InputStream downloadFile(String bucketName, String fileKey) {
        GetObjectRequest request = new GetObjectRequest(bucketName, fileKey);
        assert s3Client != null;
        S3Object response = s3Client.getObject(request);
        return response.getObjectContent();
    }

    /**
     * 删除文件
     *
     * @param bucketName 桶名
     * @param fileKey key
     */
    public static void deleteFile(String bucketName, String fileKey) {
        try {
            DeleteObjectRequest request = new DeleteObjectRequest(bucketName, fileKey);
            s3Client.deleteObject(request);
        } catch (Exception e) {
            log.error("s3Client error {} ", e.getMessage());
        }
    }

    /**
     * Bucket列表
     */
    public static List<Bucket> listFile() {
        ListBucketsRequest request = new ListBucketsRequest();
        assert s3Client != null;
        return s3Client.listBuckets(request);
    }

    /**
     * 是否存在Bucket
     *
     * @param bucketName 桶名
     * @return boolean
     */
    public static boolean isExistBucket(String bucketName) {
        try {
            HeadBucketRequest request = new HeadBucketRequest(bucketName);
            s3Client.headBucket(request);
        } catch (Exception e) {
            log.error("s3Client error {} ", e.getMessage());
            return false;
        }
        return true;
    }

    /**
     * 创建Bucket
     *
     * @param bucketName 桶名
     */
    public static void createBucket(String bucketName) {
        boolean isBucketExists = isExistBucket(bucketName);
        if (!isBucketExists) {
            try {
                CreateBucketRequest request = new CreateBucketRequest(bucketName);
                s3Client.createBucket(request);
            } catch (Exception e) {
                log.error("s3Client error {} ", e.getMessage());
            }
        }
    }

    /**
     * 删除Bucket
     *
     * @param bucketName 桶名
     */
    public static void deleteBucket(String bucketName) {
        try {
            DeleteBucketRequest request = new DeleteBucketRequest(bucketName);
            s3Client.deleteBucket(request);
        } catch (Exception e) {
            log.error("s3Client error {} ", e.getMessage());
        }
    }

    /**
     * fileKey是否存在
     *
     * @param bucketName 桶名
     * @param fileKey key
     * @return boolean
     */
    public static boolean isExistFileKey(String bucketName, String fileKey) {
        GetObjectRequest request = new GetObjectRequest(bucketName,fileKey);
        assert s3Client != null;
        S3Object response = s3Client.getObject(request);
        return response != null &&  fileKey.equals(response.getKey());
    }

    /**
     * 获取文件key
     *
     * @param fileName key
     * @return String
     */
    private static String getFileKey(String fileName) {
        String[] names = fileName.split("\\.");
        String fileTypeName = names[names.length - 1];
        return UUID.randomUUID().toString().replaceAll("-","") + "." + fileTypeName;
    }

}
AWS S3Amazon Simple Storage Service)是亚马逊提供的一种高度可扩展的对象存储服务。S3允许用户以云存储方式存储和检索任意数量的数据。以下是关于AWS S3的一些重要特性: 1. 可扩展性:AWS S3提供了高度可扩展的存储解决方案,能够适应任意规模的需求。无论是存储几个G的个人文件还是处理TB级别的数据,S3都能够满足需求。 2. 安全性:AWS S3提供多层次的安全控制来保护数据的完整性和安全性。用户可以通过控制访问权限来限制对存储桶和对象的访问。此外,S3还提供了加密功能,可以对数据进行加密,确保数据在传输和存储过程中的安全。 3. 可靠性:AWS S3采用了多副本存储和自动修复机制,确保数据的可用性和持久性。S3将数据存储在多个设备和多个区域,并且自动处理设备故障,以确保数据不会丢失。 4. 数据访问:通过AWS S3,用户可以轻松地在任何地方访问其存储的数据。S3提供了REST和SOAP接口,可以通过编程方式进行高效、低延迟的数据访问。此外,S3还提供了网页界面,方便用户直接通过浏览器进行数据管理和操作。 5. 成本效益:AWS S3采用按需计费模式,根据用户实际存储的数据量和数据传输的流量进行计费。用户只需支付实际使用的存储空间和传输流量,无需提前购买硬件设备或维护硬件设备,从而节约了成本。 总之,AWS S3是一种可靠、安全、高度可扩展的云存储解决方案。通过提供灵活的数据管理和访问方式,以及强大的安全控制和可靠性,S3帮助用户轻松地存储和管理各种类型的数据,并实现数据的安全性和可用性。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值