S3对象存储使用

1、依赖

因为aws需要发送请求上传、下载等api,所以需要加上httpclient相关的依赖

<dependency>
 <groupId>com.amazonaws</groupId>
  <artifactId>aws-java-sdk-s3</artifactId>
  <version>1.11.628</version>
</dependency>

<dependency>
  <groupId>org.apache.httpcomponents</groupId>
  <artifactId>httpclient</artifactId>
  <version>4.5.13</version>
</dependency>

<dependency>
  <groupId>org.apache.httpcomponents</groupId>
  <artifactId>httpcore</artifactId>
  <version>4.4.13</version>
</dependency>

2、工具类

一般可以把配置抽取到配置类,方法逻辑放到工具类。
上传和下载方式都被重载了,所以可以根据不同的业务场景去使用不同的重载方式。

import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import com.amazonaws.services.s3.transfer.Upload;
import com.amazonaws.util.StringUtils;
import com.mocha.order.enums.PropertiesEnum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.InputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;


@Component
public class CosUtil {
    private static final Logger LOGGER = LoggerFactory.getLogger(CosUtil.class);

    //访问key
    public static String accessKey;
    //密钥key
    public static String secretKey;

    //服务区域
    public static String serviceEndpoint; 
    //区域
    public static String region; 

    static AWSCredentials credentials ;
    static AWSStaticCredentialsProvider awsStaticCredentialsProvider ;
    static ClientConfiguration config ;
    static AwsClientBuilder.EndpointConfiguration endpointConfiguration ;
    static AmazonS3 conn ;

    //分片大小5M,一般设置5的倍数即可,分片大小最大不要超过100M
    public static final int  FIVE_PARTSIZE=5242880;

    //1M
    public static final int  ONE_PARTSIZE=1048576;


    @PostConstruct
    public void init() {
    	//这些配置的值,可以从数据库获取、也可以丛配置文件获取
        final String cosAccessKey = PropertiesEnum.getCosAccessKey();
        final String cosSecretKey = PropertiesEnum.getCosSecretKey();
        final String cosServiceEndpoint = PropertiesEnum.getCosServiceEndpoint();
        final String cosRegion = PropertiesEnum.getCosRegion();

        if (org.apache.commons.lang3.StringUtils.isBlank(cosAccessKey)){
            LOGGER.error("云Cos配置的AccessKey为空,请保证【COS_ACCESSKEY】值不为空,否则会影响项目相关功能的使用");
        }

        if (org.apache.commons.lang3.StringUtils.isBlank(cosSecretKey)){
            LOGGER.error("云Cos配置的SecretKey为空,请保证【COS_SECRETKEY】值不为空,否则会影响项目相关功能的使用");
        }

        if (org.apache.commons.lang3.StringUtils.isBlank(cosServiceEndpoint)){
            LOGGER.error("云Cos配置的ServiceEndpoint为空,请保证【COS_SERVICEENDPOINT】值不为空,否则会影响项目相关功能的使用");
        }

        if (org.apache.commons.lang3.StringUtils.isBlank(cosRegion)){
            LOGGER.error("云Cos配置的Region为空,请保证【COS_REGION】值不为空,否则会影响项目相关功能的使用");
        }

        LOGGER.info("云Cos的配置分别为,accessKey:{},secretKey:{},serviceEndpoint:{},region:{}",cosAccessKey,cosAccessKey,cosServiceEndpoint,cosRegion);

        LOGGER.info("开始初始化联通云Cos的配置,开始初始化时间:{}",new Date());

        accessKey=cosAccessKey;
        secretKey=cosSecretKey;
        serviceEndpoint=cosServiceEndpoint;
        region=cosRegion;

        LOGGER.info("结束初始化云Cos的配置,结束初始化时间:{}",new Date());

        credentials = new BasicAWSCredentials(accessKey, secretKey);
        awsStaticCredentialsProvider = new AWSStaticCredentialsProvider(credentials);
        config = new ClientConfiguration();
        endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region);
        conn = AmazonS3ClientBuilder.standard()
                .withCredentials(awsStaticCredentialsProvider)
                .withClientConfiguration(config.withProtocol(Protocol.HTTP).withSignerOverride("S3SignerType"))
                .withEndpointConfiguration(endpointConfiguration).build();
    }

    //检查桶是否存在
    public static boolean doesBucketExist(String bucketName){
        return conn.listBuckets().stream().map(Bucket::getName).collect(Collectors.toList()).contains(bucketName);
    }
    /**
     * 创建桶
     * 1、创建已经存在的桶,不会把之前存在的桶内容删除
     *
     */
    public static Bucket createBucket(String bucketName){
        LOGGER.info("创建桶:{}",bucketName);
        return conn.createBucket(bucketName);
    }

    // 列出所有桶列表
    public static List<Bucket> listBuckets(){
        return conn.listBuckets();
    }


    // 列出桶内对象
    public static ObjectListing listObjects(String bucketName){
        ObjectListing objects = conn.listObjects(bucketName);
        do {
            for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                LOGGER.info(objectSummary.getKey() + "\t" + objectSummary.getSize() + "\t" + StringUtils.fromDate(objectSummary.getLastModified()));
            }
            objects = conn.listNextBatchOfObjects(objects);
        } while (objects.isTruncated());
        return objects;
    }

    //  获取文件
    public static ObjectMetadata  getObjectMeta(String bucketName, String fileName){
        LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,fileName);
        return conn.getObjectMetadata(bucketName, fileName);
    }

    // 上传文件-通过实体类
    public static PutObjectResult uploadFile(PutObjectRequest putObjectRequest){
        LOGGER.info("上传文件桶实体类方式,实体类是:{}",putObjectRequest);
        return conn.putObject(putObjectRequest);
    }

    // 上传文件-通过File
    public static PutObjectResult uploadFile(String bucketName, String keyName, File file){
        LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
        return conn.putObject(bucketName, keyName, file);
    }

    // 上传文件-通过InputStream
    public static PutObjectResult uploadFile(String bucketName, String keyName, InputStream inputStream){
        LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
        return conn.putObject(bucketName, keyName, inputStream, new ObjectMetadata());
    }

    // 上传文件-通过字符串
    public static PutObjectResult uploadFile(String bucketName, String keyName, String content){
        LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
        return conn.putObject(bucketName, keyName, content);
    }


    // 分片上传(不要需要指定分片的大小、还提供上传进度跟踪、断点续传、并发上传等功能)
    public static Upload uploadFileByShard(String bucketName, String keyName, String filePath) throws InterruptedException,AmazonServiceException {
        TransferManager tm = TransferManagerBuilder.standard().withS3Client(conn).build();
        try {
            LOGGER.info("开始分片上传,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
            Upload upload = tm.upload(bucketName, keyName, new File(filePath));
            upload.waitForCompletion();
            LOGGER.info("结束分片上传,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
            return upload;
        } catch (AmazonServiceException | InterruptedException e) {
            LOGGER.error("分片上传异常,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
            e.printStackTrace();
            throw e;
        }
    }


    /**
     * 大文件分段上传
     *
     * @param bucketName  bucketName
     * @param objectName  objectName
     * @param file        MultipartFile
     * @param minPartSize 每片大小,单位:字节(eg:5242880 <- 5m)
     * @return 是否上传完成
     */
    public static boolean uploadMultipartFileByPart(String bucketName, String objectName, File file, int minPartSize) {

        long size = file.length();

        final String fileName = file.getName();

        if (size==0) {
            LOGGER.error("分片上传的文件:{}为空",fileName);
            return false;
        }
        // 计算分片大小

        // 得到总共的段数,和 分段后,每个段的开始上传的字节位置
        List<Long> positions = Collections.synchronizedList(new ArrayList<>());
        long filePosition = 0;
        while (filePosition < size) {
            positions.add(filePosition);
            filePosition += Math.min(minPartSize, (size - filePosition));
        }

        LOGGER.info("文件:{},总大小:{}字节,分为{}段",fileName, size, positions.size());

        // 创建一个列表保存所有分传的 PartETag, 在分段完成后会用到
        List<PartETag> partETags = Collections.synchronizedList(new ArrayList<>());

        // 第一步,初始化,声明下面将有一个 Multipart Upload
        // 设置文件类型
        ObjectMetadata metadata = new ObjectMetadata();

        String fileType = fileName.substring(fileName.lastIndexOf(".") + 1);
        metadata.setContentType(fileType);

        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName, metadata);
        InitiateMultipartUploadResult initResponse = conn.initiateMultipartUpload(initRequest);

        LOGGER.info("分片上传开始,上传的文件是:{}",fileName);

        //声明线程池
        ExecutorService exec = Executors.newFixedThreadPool(5);
        long begin = System.currentTimeMillis();
        try {
            for (int i = 0; i < positions.size(); i++) {
                int finalI = i;
                exec.execute(() -> {
                    long time1 = System.currentTimeMillis();
                    UploadPartRequest uploadRequest = new UploadPartRequest()
                            .withBucketName(bucketName)
                            .withKey(objectName)
                            .withUploadId(initResponse.getUploadId())
                            .withPartNumber(finalI + 1)
                            .withFileOffset(positions.get(finalI))
                            .withFile(file)
                            .withPartSize(Math.min(minPartSize, (size - positions.get(finalI))));
                    // 第二步,上传分段,并把当前段的 PartETag 放到列表中
                    partETags.add(conn.uploadPart(uploadRequest).getPartETag());
                    LOGGER.info("分片上传的文件时:{},第{}段上传耗时:{}",fileName ,finalI + 1, (System.currentTimeMillis() - time1));
                });
            }
            //任务结束关闭线程池
            exec.shutdown();
            //判断线程池是否结束,不加会直接结束方法
            while (true) {
                if (exec.isTerminated()) {
                    break;
                }
            }

            // 第三步,完成上传,合并分段
            CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
            conn.completeMultipartUpload(compRequest);

        } catch (Exception e) {
            conn.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId()));
            LOGGER.error("分片上传文件:{}异常,异常是:{}, ",fileName, e.getLocalizedMessage());
            return false;
        }
        LOGGER.info("分片上传文件:{}结束,总上传耗时:{}毫秒",fileName, (System.currentTimeMillis() - begin));

        return true;
    }

    /**
     * 初始化,声明有一个Multipart Upload
     *
     * @param initRequest 初始化请求
     * @return 初始化返回
     */
    private InitiateMultipartUploadResult initiateMultipartUpload(
            InitiateMultipartUploadRequest initRequest) {
        return conn.initiateMultipartUpload(initRequest);
    }

    /**
     * 上传分段
     *
     * @param uploadRequest 上传请求
     * @return 上传分段返回
     * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart">AWS
     * API Documentation</a>
     */
    private UploadPartResult uploadPart(UploadPartRequest uploadRequest) {
        return conn.uploadPart(uploadRequest);
    }

    /**
     * 分段合并
     *
     * @param compRequest 合并请求
     * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload">AWS
     * API Documentation</a>
     */
    private CompleteMultipartUploadResult completeMultipartUpload(
            CompleteMultipartUploadRequest compRequest) {
        return conn.completeMultipartUpload(compRequest);
    }

    /**
     * 中止分片上传
     *
     * @param uploadRequest 中止文件上传请求
     * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload">AWS
     * API Documentation</a>
     */
    private void abortMultipartUpload(AbortMultipartUploadRequest uploadRequest) {
        conn.abortMultipartUpload(uploadRequest);
    }


    // 修改对象的访问控制权限
    public static void modifyFileAccessAuthority(String bucketName, String keyName, CannedAccessControlList cannedAccessControlList){
        LOGGER.info("修改文件的访问权限,桶名称:{},文件名称:{},权限类型时:{}",bucketName,keyName,cannedAccessControlList.toString());
        conn.setObjectAcl(bucketName, keyName, cannedAccessControlList);
    }

    // 下载一个对象(到指定路径)
    public static ObjectMetadata downloadFileAssignPath(String bucketName, String keyName, String fileSavePath){
        return conn.getObject(new GetObjectRequest(bucketName, keyName), new File(fileSavePath));
    }

    // 生成对象下载链接(带签名)
    public static URL generatorFileUrl(String bucketName, String keyName){
        GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, keyName);
        return conn.generatePresignedUrl(request);
    }

    // 删除文件
    public static void deleteFie(String bucketName, String keyName){
        LOGGER.info("删除文件,桶名称:{},文件名称:{},删除时间:{}",bucketName,keyName,new Date());
        conn.deleteObject(bucketName, keyName);
    }

    // 删除桶
    public static void deleteFie(String bucketName){
        LOGGER.info("删除桶,桶名称:{},删除时间:{}",bucketName,new Date());
        conn.deleteBucket(bucketName);
    }





    public static void main(String[] args) {

        String filePath = "G:\\mk\\GDrepo\\repo\\org\\apache\\httpcomponents\\httpcomponents-core\\4.0.1\\httpcomponents-core-4.0.1.pom"; //文件路径

//        ByteArrayInputStream input1 = new ByteArrayInputStream("Hello World!".getBytes());
//        CosUtil.createBucket("aaa");
//        final FileInputStream inputStream;
//        try {
//            inputStream = new FileInputStream("G:\\mk\\GDrepo\\repo\\org\\apache\\httpcomponents\\httpcomponents-core\\4.0.1\\httpcomponents-core-4.0.1.pom");
//        } catch (FileNotFoundException e) {
//            throw new RuntimeException(e);
//        }
//        CosUtil.uploadFile("aaa","a.pom",inputStream);

        CosUtil.uploadFile("test","a.txt",new File(filePath));
        CosUtil.uploadFile("test","a.txt",new File(filePath));


        //上传空文件夹
        conn.putObject("aaa", "demo" + "/","");
        //上传文件到指定的文件夹
        final PutObjectRequest putObjectRequest = new PutObjectRequest("aaa", "demo" + "/" + "a.txt", new File(filePath));
        conn.putObject(putObjectRequest);


        // 列出所有桶列表
        List<Bucket> buckets = conn.listBuckets();
        for (Bucket bucket : buckets) {
            System.out.println(bucket.getName() + "\t" + StringUtils.fromDate(bucket.getCreationDate()));
        }

        // 创建桶
        Bucket bucket = conn.createBucket("111");
        Bucket bucket1 = conn.createBucket("111");
        Bucket bucket12 = conn.createBucket("111");
        System.out.println(bucket.getName());

        // 列出桶内对象
        ObjectListing objects = conn.listObjects(bucket.getName());
        do {
            for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                System.out.println(objectSummary.getKey() + "\t" +
                        objectSummary.getSize() + "\t" +
                        StringUtils.fromDate(objectSummary.getLastModified()));
            }
            objects = conn.listNextBatchOfObjects(objects);
        } while (objects.isTruncated());

        // 创建对象
        ByteArrayInputStream input = new ByteArrayInputStream("Hello World!".getBytes());
        conn.putObject(bucket.getName(), "hello.txt", input, new ObjectMetadata());

        //分段上传
        String keyName = "demoya"; //文件上传成功后的文件名

        TransferManager tm = TransferManagerBuilder.standard().withS3Client(conn).build();

        try {
            // TransferManager processes all transfers asynchronously,
            // so this call returns immediately.
            System.out.println("开始分片上传");
            Upload upload = tm.upload("my-test-bucket", keyName, new File(filePath));

            //Optionally, wait for the upload to finish before continuing.
            upload.waitForCompletion();
            System.out.println("结束分片上传");
        } catch (AmazonServiceException | InterruptedException e) {
            // The call was transmitted successfully, but Amazon S3 couldn't process
            // it, so it returned an error response.
            e.printStackTrace();
        }

        // 修改对象的访问控制权限
        conn.setObjectAcl(bucket.getName(), "hello.txt", CannedAccessControlList.PublicRead);

        // 下载一个对象(到本地文件)
        conn.getObject(new GetObjectRequest(bucket.getName(), "hello.txt"), new File("G:\\mk\\hello.txt"));

        // 生成对象下载链接(带签名)
        GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket.getName(), "hello.txt");
        System.out.println(conn.generatePresignedUrl(request));

        // 删除一个对象
        conn.deleteObject(bucket.getName(), "hello.txt");

        //删除桶
        conn.deleteBucket(bucket.getName());

    }
}


3、分片设置MD5

为了确保数据在网络中不被破坏,在Upload Part请求中指定Content-MD5头。Amazon S3根据提供的MD5值检查部件数据。如果它们不匹配,Amazon S3将返回一个错误。

import java.io.FileInputStream;
import java.io.InputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Base64;

// ... 省略其他代码 ...

public static boolean uploadMultipartFileByPart(String bucketName, String objectName, File file, int minPartSize) {
    // ... 省略其他代码 ...

    try {
        for (int i = 0; i < positions.size(); i++) {
            int finalI = i;
            exec.execute(() -> {
                long time1 = System.currentTimeMillis();
                long partSize = Math.min(minPartSize, (size - positions.get(finalI)));

                // 计算当前分片的 MD5 值
                String md5 = calculateMD5(file, positions.get(finalI), partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest()
                        .withBucketName(bucketName)
                        .withKey(objectName)
                        .withUploadId(initResponse.getUploadId())
                        .withPartNumber(finalI + 1)
                        .withFileOffset(positions.get(finalI))
                        .withFile(file)
                        .withPartSize(partSize)
                        .withMd5Digest(md5);  // 设置 MD5 校验和

                // 上传分片
                partETags.add(conn.uploadPart(uploadRequest).getPartETag());
                LOGGER.info("分片上传的文件时:{},第{}段上传耗时:{}", fileName, finalI + 1, (System.currentTimeMillis() - time1));
            });
        }

        // ... 省略其他代码 ...
    } catch (Exception e) {
        // ... 省略异常处理 ...
    }
    // ... 省略其他代码 ...
    return true;
}

// 计算 MD5 值的方法
private static String calculateMD5(File file, long offset, long length) {
    try (InputStream is = new FileInputStream(file)) {
        is.skip(offset); // 跳过前面的字节

        byte[] buffer = new byte[8192]; // 缓冲区
        MessageDigest md5Digest = MessageDigest.getInstance("MD5");
        long bytesRead = 0;
        int read;

        // 读取分片数据并计算 MD5
        while (bytesRead < length && (read = is.read(buffer, 0, (int)Math.min(buffer.length, length - bytesRead))) != -1) {
            md5Digest.update(buffer, 0, read);
            bytesRead += read;
        }

        // 返回 MD5 的 Base64 编码
        return Base64.getEncoder().encodeToString(md5Digest.digest());
    } catch (Exception e) {
        LOGGER.error("计算 MD5 失败: {}", e.getMessage());
        return null; // 或者抛出异常
    }
}

4、分片过期时间

大文件上传的时候可以调用两个接口实现,一个接口实现分片上传,另一个接口合并分片。但是分片也是有效期的。
为了防止分片长时间不合并而占用存储资源,您可以实施以下策略:

设置有效期:为每个上传的分片设置一个有效期(例如,24小时或48小时)。如果在此时间内未进行合并,则自动删除这些分片。
定期清理:可以定期运行一个后台任务,检查所有上传的分片,并删除那些已经超过有效期且未合并的分片。

### Java 使用 S3Client 操作 Amazon S3 对象存储 #### 创建 S3 客户端实例 为了与 Amazon S3 进行交互,首先需要创建 `S3Client` 的实例。默认情况下会使用签名版本 4 (v4),即 `S3Signer.v4()` 来签署请求[^2]。 ```java import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; public class S3Example { public static void main(String[] args) { Region region = Region.US_EAST_1; // 设置区域 S3Client s3 = S3Client.builder() .region(region) .credentialsProvider(ProfileCredentialsProvider.create()) .build(); System.out.println("成功创建 S3 Client"); } } ``` #### 列出所有存储桶 通过调用 `listBuckets` 方法可以获取当前账户下的全部存储桶列表: ```java import software.amazon.awssdk.services.s3.model.Bucket; import java.util.List; // ... List<Bucket> buckets = s3.listBuckets().buckets(); for (Bucket bucket : buckets) { System.out.printf("%s\n", bucket.name()); } ``` #### 向存储桶上传文件 要向特定的存储桶上传对象(文件),可以通过 `putObject` 请求完成此操作。这里展示了一个简单的例子来说明如何实现这一点: ```java import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.model.PutObjectRequest; //... String bucketName = "example-bucket"; String key = "myKey"; // 文件名或路径 PutObjectRequest putObReq = PutObjectRequest.builder() .bucket(bucketName) .key(key) .build(); s3.putObject(putObReq, RequestBody.fromFile(new File("/path/to/file"))); System.out.println("文件上传成功!"); ``` #### 下载文件到本地磁盘 下载指定键的对象至本地系统同样简单明了,只需利用 `getObject` API 并指定期望保存的位置即可: ```java import java.nio.file.Paths; //... GetObjectRequest getObjectRequest = GetObjectRequest.builder() .bucket(bucketName) .key(key) .build(); s3.getObject(getObjectRequest, Paths.get("/local/path/destination")); System.out.println("文件下载完毕!"); ``` #### 删除单个对象 当不再需要某个对象时,可通过其对应的键执行删除动作: ```java DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder() .bucket(bucketName) .key(keyToDelete) .build(); s3.deleteObject(deleteObjectRequest); System.out.println("删除成功!"); ``` 以上就是关于如何在 Java使用 `S3Client` 类库来进行基本 CRUD 操作的一些指导[^1]。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

无敌的黑星星

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值