Elasticsearch 入门综合练习详解

Elasticsearch入门综合练习详解

在这里插入图片描述

在这里插入图片描述

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.lichun</groupId>
    <artifactId>lichun_elasticsearch</artifactId>
    <version>1.0-SNAPSHOT</version>

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.1.4.RELEASE</version>
    </parent>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-elasticsearch</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>
    </dependencies>

</project>

ESApplication.java

package com.lichun;

import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;

@SpringBootApplication
public class ESApplication {
    public static void main(String[] args) {
        SpringApplication.run(ESApplication.class, args);
    }
}

Article.java

package com.lichun.pojo;

import org.springframework.data.annotation.Id;
import org.springframework.data.elasticsearch.annotations.Document;
import org.springframework.data.elasticsearch.annotations.Field;
import org.springframework.data.elasticsearch.annotations.FieldType;

import java.io.Serializable;

/**
 * field 修饰POJO中的属性,标识该属性和es中的字段建立映射关系
 * type 指定数据类型,text默认就是分词的,分词器就是标准分词器
 * index 是否索引 默认是true
 * store 是否存储 默认是false
 * analyzer 设置建立倒排索引的时候使用的分词器
 * searchAnalyzer 设置搜索的时候使用的分词器,一般不用设置,默认和analzyer指定的一致
 */

@Document(indexName = "blog03", type = "article")
public class Article implements Serializable {

    @Id
    private Long id;
    @Field(type = FieldType.Text, index = true, store = false, analyzer = "ik_smart", searchAnalyzer = "ik_smart")
    private String title;
    @Field(type = FieldType.Text, index = true, store = false, analyzer = "ik_smart", searchAnalyzer = "ik_smart")
    private String content;

    @Override
    public String toString() {
        return "Article{" +
                "id=" + id +
                ", title='" + title + '\'' +
                ", content='" + content + '\'' +
                '}';
    }

    public Article() {
    }

    public Article(Long id, String title, String content) {
        this.id = id;
        this.title = title;
        this.content = content;
    }

    public Long getId() {
        return id;
    }

    public void setId(Long id) {
        this.id = id;
    }

    public String getContent() {
        return content;
    }

    public void setContent(String content) {
        this.content = content;
    }

    public String getTitle() {
        return title;
    }

    public void setTitle(String title) {
        this.title = title;
    }
}

ArticleDao.java

package com.lichun.dao;

import com.lichun.pojo.Article;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.elasticsearch.annotations.Query;
import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;

import java.util.List;

/**
 * Article,Long
 * 第一个标识操作的数据类型
 * 第二个标识操作的数据类型的文档唯一标识的数据类型
 */
public interface ArticleDao extends ElasticsearchRepository<Article, Long> {
    // 根据title模糊查询
    List<Article> findByTitleLike(String title);

    // 根据title模糊查询,根据id降序
    List<Article> findByTitleLikeOrderByIdAsc(String title);

    // 模块查询,排序加分页
    Page<Article> findByTitleLikeOrderByIdAsc(String title, Pageable pageable);

    @Query("{\"match\": {\"title\": {\"query\": \"?0\"}}}")
    List<Article> luanxiede(String xiaxiede);
}

ESTest01.java

package com.lichun;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.lichun.pojo.Article;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.xcontent.XContentType;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;

import java.util.Map;

@SpringBootTest
@RunWith(SpringRunner.class)
public class ESTest01 {

    // 操作链接到es服务端的客户端API
    @Autowired
    private TransportClient transportClient;

    // json转换器
    @Autowired
    private ObjectMapper objectMapper;

    @Test
    public void create() throws Exception {
        Article article = new Article(1L, "爱", "我爱王春婷");

        // 参数1 指定索引名
        // 参数2 指定类型名称
        // 参数3 指定文档的唯一标识
        IndexResponse indexResponse = transportClient.prepareIndex("blog01", "article", "1")
                .setSource(objectMapper.writeValueAsString(article), XContentType.JSON)
                // 指定动作 将数据存储到es服务器中
                .get();
        System.out.println(indexResponse.getIndex() + ":" + indexResponse.getVersion() + ":" + indexResponse.getType());
    }

    @Test
    public void delete() {
        transportClient.prepareDelete("blog03", "article", "1").get();
    }

    @Test
    public void select() {
        GetResponse docuemntResponse = transportClient.prepareGet("blog01", "article", "1").get();
        Map<String, Object> sourceAsMap = docuemntResponse.getSourceAsMap();
        String sourceAsString = docuemntResponse.getSourceAsString();
        System.out.println(sourceAsMap);
        System.out.println(sourceAsString);
    }

}

ESTest02.java

package com.lichun;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.lichun.pojo.Article;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.*;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;

import java.io.IOException;
import java.util.Map;

@SpringBootTest
@RunWith(SpringRunner.class)
public class ESTest02 {

    @Autowired
    private TransportClient transportClient;

    @Autowired
    private ObjectMapper objectMapper;

    // 创建索引
    @Test
    public void onlyCreateIndex() {
        transportClient.admin().indices().prepareCreate("blog03").get();
    }

    // 删除索引
    @Test
    public void deleteIndex() {
        transportClient.admin().indices().prepareDelete("blog03").get();
    }

    // 手动创建映射
    /**
     * "mappings" : {
     *     "article" : {
     *         "properties" : {
     *             "id" : { "type" : "long","store":"true" },
     *             "title" : { "type" : "text","analyzer":"ik_smart","index":"true","store":"true" },
     *             "content" : { "type" : "text","analyzer":"ik_smart","index":"true","store":"true" }
     *         }
     *     }
     * }
     */
    @Test
    public void putMapping() throws Exception {
        // 1.创建索引
        transportClient.admin().indices().prepareCreate("blog03").get();
        // 2.创建映射 设置针对某一个索引进行映射
        PutMappingRequest putMappingRequest = new PutMappingRequest("blog03");
        // 设置类型
        putMappingRequest.type("article");
        // 设置具体的映射关系
        XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()
                .startObject()
                    .startObject("article")
                        .startObject("properties")
                            .startObject("id")
                                .field("type","long")
                                .field("store","false")
                            .endObject()
                            .startObject("title")
                                .field("type","text")
                                .field("store","true")
                                .field("analyzer","ik_smart")
                                .field("index","true")
                            .endObject()
                            .startObject("content")
                                .field("type","text")
                                .field("store","true")
                                .field("analyzer","ik_smart")
                                .field("index","true")
                            .endObject()
                        .endObject()
                    .endObject()
                .endObject()
                ;
        putMappingRequest.source(xContentBuilder);
        transportClient.admin().indices().putMapping(putMappingRequest).get();
    }

    // 创建文档(向es中的索引添加文档数据)
    @Test
    public void createDocument() throws Exception {
        Article article = new Article(1L, "爱", "我爱王春婷");

        IndexResponse indexResponse = transportClient.prepareIndex("blog03", "article", "1")
                // 设置文档数据
                .setSource(objectMapper.writeValueAsString(article), XContentType.JSON)
                .get();
        System.out.println(indexResponse.getIndex() + ":" + indexResponse.getVersion() + ":" + indexResponse.getType());
    }

    //创建文档 另外一种方式 文档就是JSON
    @Test
    public void createDocumentJson() throws  Exception{
        XContentBuilder xcontentbuilder= XContentFactory.jsonBuilder()
                .startObject()
                .field("id",2)
                .field("title","爱情有时候也不是很美好")
                .field("content","王春婷哔哔哔")
                .endObject();
        transportClient.prepareIndex("blog03","article","2")
                .setSource(xcontentbuilder)
                .get();
    }

    @Test
    public void deleteDocument() {
        transportClient.prepareDelete("blog03", "article", "2").get();
    }

    // 造数据
    @Test
    public void createBatch() throws Exception {
        long start = System.currentTimeMillis();
        for (long i = 0; i < 100; i++) {
            Article article = new Article(i, "爱" + i, "我爱王春婷" + i);

            IndexResponse indexResponse = transportClient.prepareIndex("blog03", "article", i + "")
                    // 设置文档数据
                    .setSource(objectMapper.writeValueAsString(article), XContentType.JSON)
                    .get();
        }
        long end = System.currentTimeMillis();
        System.out.println("花费了:" + (end - start) + "ms");
    }

    // 优化造数据
    @Test
    public void createBatch02() throws Exception {
        long start = System.currentTimeMillis();
        BulkRequestBuilder bulkRequestBuilder = transportClient.prepareBulk();
        for (long i = 0; i < 100; i++) {
            Article article = new Article(i, "爱情是美好滴" + i, "我爱王春婷forever" + i);

            String jsonstr = objectMapper.writeValueAsString(article);
            IndexRequest request = new IndexRequest("blog03", "article", i + "");
            request.source(jsonstr, XContentType.JSON);

            bulkRequestBuilder.add(request);

        }
        // 批量进行导入
        bulkRequestBuilder.get();
        long end = System.currentTimeMillis();
        System.out.println("花费了:" + (end - start) + "ms");
    }

    // 文档查询

    @Test
    public void mathchAllQuery() throws Exception {
        // 创建查询对象 设置查询条件
        QueryBuilder query = QueryBuilders.matchAllQuery(); // select *

        // 执行查询
        SearchResponse response = transportClient
                .prepareSearch("blog03")     // 设置从哪一个索引中搜索
                .setTypes("article")        // 设置搜索的类型
                .setQuery(query)            // 设置查询条件
                .get();// 执行

        // 获取结果集
        SearchHits hits = response.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());

        // 遍历结果集
        for (SearchHit hit : hits) {
            // 相当于是一个JSON的文档 Articile
            String articleJson = hit.getSourceAsString();
            System.out.println(articleJson);
            objectMapper.readValue(articleJson, Article.class);
        }
    }

    // 字符串查询 页面传递一个搜索的文本
    // 特点:只能搜索字符串。从字符串类型的字段中进行搜索,搜索时使用了标准分词器
    @Test
    public void queryStringQuery() {
        SearchResponse searchResponse = transportClient.prepareSearch("blog03")
                .setTypes("article")
                // 参数1 指定要搜素的内容
                // 参数2 指定要搜索的字段 如果没有写从所有的字符串类型的字段中进行搜索
                .setQuery(QueryBuilders.queryStringQuery("春婷").field("content"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 匹配查询
    // 特点:先进行分词,再进行匹配查询,再和合并返回数据
    // 搜索时使用的分词器和创建倒排索引的时候的分词器保持一致
    @Test
    public void matchQuery() {
        SearchResponse searchResponse = transportClient.prepareSearch("blog03")
                .setTypes("article")
                // 参数1 指定要搜素的字段
                // 参数2 指定要搜索的内容
                .setQuery(QueryBuilders.matchQuery("content", "春婷"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }


    // 多字段匹配查询
    @Test
    public void multiMatchQuery() {
        SearchResponse searchResponse = transportClient.prepareSearch("blog03")
                .setTypes("article")
                // 参数1 指定要搜索的值
                // 参数2 指定从哪一些字段上进行搜索
                .setQuery(QueryBuilders.multiMatchQuery("春婷", "title", "content"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }



    // 模糊搜索
    // * 表示任意字符 可以占用也可以不占字符空间
    // ? 表示任意字符 占用一个字符空间  手机?
    @Test
    public void wildcardQuery() {
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                .setQuery(QueryBuilders.wildcardQuery("content", "美?"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 相似度查询
    // 将相似的单词进行匹配搜索
    @Test
    public void fuzzyQuery() {
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                .setQuery(QueryBuilders.fuzzyQuery("content","forarer" ))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 范围查询
    @Test
    public void rangeQuery() {
        //1.创建查询对象 设置查询条件 执行查询
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                // gte >=   lte <=
                // from to
                .setQuery(QueryBuilders.rangeQuery("id").from(5,true).to(10,true))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 词条查询
    // 特点:不分词,整体进行匹配查询
    @Test
    public void termQuery() {
        //1.创建查询对象 设置查询条件 执行查询
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                //参数1 指定要搜索的字段 参数2 指定要搜索的值 不分词进行匹配查询
                .setQuery(QueryBuilders.termQuery("title", "美好"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // bool查询(多条件组合查询)
    // 需求一:term查询 title:美好
    // 需求二:id范围在[10,15)
    @Test
    public void boolQuery() {
        TermQueryBuilder queryBuilder1 = QueryBuilders.termQuery("title", "美好");
        RangeQueryBuilder queryBuilder2 = QueryBuilders.rangeQuery("id").gte(10).lt(15);

        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        // MUST         必须要满足  相当于AND
        // MUST_NOT     必须不满足  相当于NOT
        // SHOULD       应该满足
        // filter       必须要满足  相当于AND
        boolQueryBuilder.filter(queryBuilder1);
        boolQueryBuilder.filter(queryBuilder2);

        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                .setQuery(boolQueryBuilder)
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 分页排序 limit 0,10
    @Test
    public void pageSortQuery() {
        //1.创建查询对象 设置查询条件 执行查询
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                .setFrom(3)     // (page - 1) * rows
                .setSize(10)    // rows
                .addSort("id", SortOrder.DESC)
                .setQuery(QueryBuilders.termQuery("title", "爱情"))
                .get();
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            System.out.println("搜索的到的数据:" + hit.getSourceAsString());
        }
    }

    // 查询的时候就要高亮
    // 搜索的时候输入了一个 爱情
    // 返回给页面时携带数据为:{“title”:“<em style="color:red">手机</em>”}
    // 根据查询的文本进行分词,再进行匹配。匹配成功进行字符截取,进行拼接前缀和后缀,返回给页面 (内部做的事情)
    // 你需要指定:设置高亮的字段以及设置前缀和后缀
    // 你需要获取高亮的数据 返回给页面
    @Test
    public void highlightQuery() throws IOException {
        // 设置高亮的字段以及设置前缀和后缀
        HighlightBuilder highlightBuilder = new HighlightBuilder();
        highlightBuilder
                .field("content")
                .preTags("<span style=\"color:red\">")
                .postTags("</span>");

        // 创建查询对象,设置查询条件,执行查询
        SearchResponse searchResponse = transportClient
                .prepareSearch("blog03")
                .setTypes("article")
                // 设置高亮的条件
                .highlighter(highlightBuilder)
                .setQuery(QueryBuilders.matchQuery("content", "王春婷"))
                .get();

        // hit.getSourceAsString() 获取到的数据是没有高亮的
        SearchHits hits = searchResponse.getHits();
        System.out.println("根据条件命中的总记录数:" + hits.getTotalHits());
        for (SearchHit hit : hits) {
            // System.out.println("搜索的到的数据:" + hit.getSourceAsString());

            // 获取高亮的数据
            // key是高亮的字段,value是高亮的字段对应的数据值(高亮的内容)
            Map<String, HighlightField> highlightFields = hit.getHighlightFields();
            // [content], fragments[[我爱<span style="color:red">王</span><span style="color:red">春</span><span style="color:red">婷</span>forever2]]
            HighlightField highlightField = highlightFields.get("content");
            Text[] fragments = highlightField.getFragments();
            StringBuffer sb = new StringBuffer();
            for (Text fragment : fragments) {
                String highlight = fragment.string();
                sb.append(highlight);
            }
            // 我爱<span style="color:red">王</span><span style="color:red">春</span><span style="color:red">婷</span>forever2
            String s = sb.toString();
            Article article = objectMapper.readValue(hit.getSourceAsString(), Article.class);
            article.setTitle(s);
            System.out.println(article.toString());
        }
    }
}

TestDataES.java

package com.lichun;

import com.lichun.dao.ArticleDao;
import com.lichun.pojo.Article;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.data.elasticsearch.core.ElasticsearchTemplate;
import org.springframework.test.context.junit4.SpringRunner;

import java.util.ArrayList;
import java.util.List;

@SpringBootTest
@RunWith(SpringRunner.class)
public class TestDataES {

    @Autowired
    private ArticleDao articleDao;

    @Autowired
    private ElasticsearchTemplate elasticsearchTemplate;


    // 创建索引和映射
    @Test
    public void createIndexAndMapping(){
        elasticsearchTemplate.createIndex(Article.class);
        elasticsearchTemplate.putMapping(Article.class);
    }

    // 创建文档
    @Test
    public void createDocument() {
        Article article = new Article(1L, "谢谢关注我的博客", "哔哔哔请点赞支持");
        articleDao.save(article);
    }

    // 批量创建文档
    @Test
    public void createDocuments() {
        List<Article> articles = new ArrayList<>();
        for (long i = 0; i < 100; i++) {
            Article article = new Article(i,"感谢关注我的blog!" + i,"枯木何日可逢春啊" + i);
            articles.add(article);
        }
        articleDao.saveAll(articles);
    }

    // 删除文档
    @Test
    public void deleteDocument() {
        articleDao.deleteById(1L);
    }

    @Test
    public void selectById() {
        Article article = articleDao.findById(1L).get();
        System.out.println(article);
    }

    @Test
    public void findAll() {
        Iterable<Article> all = articleDao.findAll();
        for (Article article : all) {
            System.out.println(article);
        }
    }

    @Test
    //排序
    public void findAllAndSort() {
        Iterable<Article> iterable = articleDao.findAll(Sort.by(Sort.Order.asc("id")));
        for (Article article : iterable) {
            System.out.println(article);
        }
    }

    // 分页查询文档
    @Test
    public void findAllAndPage() {
        Iterable<Article> all = articleDao.findAll();
        // 参数1 指定排序的类型
        // 参数2 指定排序的字段
        Sort sort = new Sort(Sort.Direction.DESC,"id");
        // 参数1 指定当前的页码
        // 参数2 指定每页显示的行
        // 参数3 指定排序对象
        Pageable pageble = PageRequest.of(0, 10, sort);
        Page<Article> page = articleDao.findAll(pageble);

        long totalElements = page.getTotalElements();
        System.out.println("总记录数:" + totalElements);
        int totalPages = page.getTotalPages();
        System.out.println("总页数:" + totalPages);

        // 当前页的记录
        List<Article> content = page.getContent();
        for (Article article : content) {
            System.out.println(article.getTitle());
        }
    }

}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

枯木何日可逢春

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值