SpringBoot(2.2.4.RELEASE)整合ElasticSearch7.15.2

1.Docker安装ElasticSearch7.17.1

A.下载Docker镜像
    docker pull elasticsearch:7.17.1  #存储和检索数据
    docker pull kibana:7.17.1   #可视化检索数据
B.在Linux执行以下命令:
    mkdir -p  /mydata/elasticsearch/config
    mkdir -p  /mydata/elasticsearch/plugins/ik
    mkdir -p  /mydata/elasticsearch/data
    echo "http.host: 0.0.0.0" >> /mydata/elasticsearch/config/elasticsearch.yml
    chmod -R 777 /mydata/elasticsearch/   #保证权限
C.Docker中安装ElasticSearch7.17.1:
    docker run --name elasticsearch7.17.1  -p 9200:9200 -p 9300:9300  -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx256m" -v /mydata/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /mydata/elasticsearch/data:/usr/share/elasticsearch/data -v /mydata/elasticsearch/plugins:/usr/share/elasticsearch/plugins -d elasticsearch:7.17.1

    #特别注意:-e ES_JAVA_OPTS="-Xms64m -Xmx256m" \ 测试环境下,设置 ES 的初始内存和最大内存,否则导致过大启动不了ES
D.安装可视化界面Kibana7.17.1:
    docker run --name kibana7.17.1 -e ELASTICSEARCH_HOSTS=http://192.168.236.133:9200 -p 5601:5601 -d kibana:7.17.1
    #注意换成自己的ip地址;
    
E.安装ik分词器,按顺序执行以下命令:
    cd /mydata/elasticsearch/plugins/ik
    下载命令:wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.17.1/elasticsearch-analysis-ik-7.17.1.zip
    解压下载的文件:unzip  下载的文件
    删除下载的压缩包:rm –rf *.zip
    重启容器,验证ik分词器是否安装成功:
        1.docker exec -it ES容器id /bin/bash
        2.cd ../bin
        3.elasticsearch-plugin list   #即可列出系统的分词器



2.导入ElasticSearch依赖

        <!-- https://mvnrepository.com/artifact/org.elasticsearch.client/elasticsearch-rest-high-level-client -->
        <dependency>
            <groupId>org.elasticsearch</groupId>
            <artifactId>elasticsearch</artifactId>
            <version>7.15.2</version>
        </dependency>
        <!--ES版本与SpringBoot中的Spring Data Elasticsearch没有合适的版本,在项目启动时给出警告提示。
        于是决定放弃使用Spring Data Elasticsearch,直接使用elasticsearch-rest-high-level-client来操作ES。-->
        <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-high-level-client</artifactId>
            <version>7.15.2</version>
        </dependency>
        <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-client</artifactId>
            <version>7.15.2</version>
        </dependency>

3.创建配置文件

import org.apache.http.HttpHost;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration
public class RestClientConfig {
    public static final RequestOptions COMMON_OPTIONS;
    static {
        RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
        // builder.addHeader("Authorization", "Bearer " + TOKEN);
        // builder.setHttpAsyncResponseConsumerFactory(
        //         new HttpAsyncResponseConsumerFactory
        //                 .HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024));
        COMMON_OPTIONS = builder.build();
    }
    @Bean
    public RestHighLevelClient esRestClient(){
        return new RestHighLevelClient(
                RestClient.builder(new HttpHost("192.168.236.133", 9200, "http")));
    }
}

4.相关API测试

import com.fasterxml.jackson.databind.ObjectMapper;
import com.xufei.demo.config.RestClientConfig;
import com.xufei.demo.dao.User;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.CreateIndexResponse;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.client.indices.GetIndexResponse;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import java.io.IOException;
import java.util.Arrays;
@SpringBootTest
public class ElasticSearchTest {
    @Autowired
    private RestHighLevelClient esClient;

    /**
     * 创建索引
     * @throws IOException
     */
    @Test
    public void createIndexRequest() throws IOException {
        CreateIndexRequest request = new CreateIndexRequest("user");
        CreateIndexResponse createIndexResponse = null;
        createIndexResponse = esClient.indices().create(request, RestClientConfig.COMMON_OPTIONS);
        boolean acknowledged = createIndexResponse.isAcknowledged();
        System.out.println(acknowledged);
    }

    /**
     * 删除索引
     * @throws IOException
     */
    @Test
    public void deleteIndexRequest() throws IOException {
        DeleteIndexRequest request = new DeleteIndexRequest("user");
        AcknowledgedResponse delete = esClient.indices().delete(request, RestClientConfig.COMMON_OPTIONS);
        boolean acknowledged = delete.isAcknowledged();
        System.out.println(acknowledged);
    }

    /**
     * 查询索引
     * @throws IOException
     */
    @Test
    public void findIndexRequest() throws IOException {
        // 查询索引
        GetIndexRequest request = new GetIndexRequest("user");
        GetIndexResponse getIndexResponse = esClient.indices().get(request, RestClientConfig.COMMON_OPTIONS);
        // 响应状态
        System.out.println(getIndexResponse.getAliases());
        System.out.println(getIndexResponse.getMappings());
        System.out.println(getIndexResponse.getSettings());
    }

    /**
     * 单个插入测试
     * 注意:创建完索引后再到可视化界面:Kibana创建索引的映射关系,在进行插入测试
     */
    @Test
    public void insertOne() throws IOException {
        // 插入数据
        IndexRequest request = new IndexRequest();
        request.index("user").id("1009");
        User user = new User();
        user.setAge(14);
        user.setName("Jack");
        user.setSex("男");
        // 向ES插入数据,必须将数据转换位JSON格式
        ObjectMapper mapper = new ObjectMapper();
        String userJson = mapper.writeValueAsString(user);
        request.source(userJson, XContentType.JSON);
        IndexResponse response = esClient.index(request, RestClientConfig.COMMON_OPTIONS);
        System.out.println(response.getResult());
    }

    /**
     * 批量插入测试
     * @throws IOException
     */
    @Test
    public void insertList() throws IOException {
        // 批量插入数据
        BulkRequest request = new BulkRequest();
        User user = new User();
        user.setAge(17);
        user.setName("Mary");
        user.setSex("男");
        // 向ES插入数据,必须将数据转换位JSON格式
        ObjectMapper mapper = new ObjectMapper();
        String userJson = mapper.writeValueAsString(user);
        request.add(new IndexRequest().index("user").id("1010").source(userJson, XContentType.JSON));
        BulkResponse response = esClient.bulk(request, RestClientConfig.COMMON_OPTIONS);
        System.out.println(response.getTook());
        System.out.println(Arrays.toString(Arrays.stream(response.getItems()).toArray()));
    }

    /**
     * 单个删除测试
     */
    @Test
    public void deleteOne() throws IOException {
        DeleteRequest request = new DeleteRequest();
        request.index("user").id("1009");
        DeleteResponse response = esClient.delete(request, RestClientConfig.COMMON_OPTIONS);
        System.out.println(response.toString());
    }

    /**
     * 批量删除测试
     * @throws IOException
     */
    @Test
    public void deleteList() throws IOException {
        BulkRequest request = new BulkRequest();
        request.add(new DeleteRequest().index("user").id("1001"));
        request.add(new DeleteRequest().index("user").id("1002"));
        request.add(new DeleteRequest().index("user").id("1003"));
        BulkResponse response = esClient.bulk(request, RestClientConfig.COMMON_OPTIONS);
        System.out.println(response.getTook());
        System.out.println(Arrays.toString(response.getItems()));
    }
    /**
     * 更新测试
     */
    @Test
    public void update() throws IOException {
        // 修改数据
        UpdateRequest request = new UpdateRequest();
        request.index("user").id("1009");
        request.doc(XContentType.JSON, "sex", "男", "name", "Jack01");
        UpdateResponse response = esClient.update(request, RestClientConfig.COMMON_OPTIONS);
        System.out.println(response.getResult());
    }

    /**
     * 该查询方法只是个示例,并不能执行,只表示一个的查询逻辑
     * @throws IOException
     */
    //@Test
    public void search() throws IOException {
        /**
         * 创建SearchRequest对象(并且设置要查询索引名称 )
         */
        SearchRequest request = new SearchRequest();
        request.indices("user");//“user是索引名”
        /**
         * 创建SearchSourceBuilder对象(通过此字段设置查询的规则;)
         */
        SearchSourceBuilder builder = new SearchSourceBuilder();

        HighlightBuilder highlightBuilder = new HighlightBuilder();
        AggregationBuilder aggregationBuilder = AggregationBuilders.terms("ageGroup").field("字段值");
        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();

        /**
         * 1.过滤字段: 设置查询你想要的字段和不想要的字段;
         */
        builder.fetchSource("name", "sex")
                /**
                 2.设置查询条件(查询条件有多种选择,你可以选择下面的任何一项,必须要有一个)
                     A.组合条件查询(合并多个查询条件):BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
                         boolQueryBuilder.must(QueryBuilders.matchQuery("sex", "男")); //相当于“and”
                         boolQueryBuilder.mustNot(QueryBuilders.matchQuery("sex", "男"));//相当于“排除”
                         boolQueryBuilder.should(QueryBuilders.matchQuery("age", 30));//相当于“or”
                     B.单个条件查询:TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("字段名", "字段值");
                     C.全量查询:MatchAllQueryBuilder matchAllQueryBuilder = QueryBuilders.matchAllQuery();
                     D.范围查询:RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery("字段名");
                         rangeQuery.gte(30);
                         rangeQuery.lt(50);
                     E.模糊查询:FuzzyQueryBuilder fuzzyQueryBuilder = QueryBuilders.fuzzyQuery("字段名", "输入值").fuzziness(Fuzziness.TWO);
                        Fuzziness.TWOFuzzine---->表示和输入值差2个字符(需要设置偏差的范围);
                     G.过滤查询:boolQueryBuilder.filter(QueryBuilders.rangeQuery("age").gt(25).lt(55));
                     F.非“Text”字段建议使用:TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("age", 30);
                        builder.query(termQueryBuilder);
                     H.短语匹配:MatchPhraseQueryBuilder matchPhraseQueryBuilder = QueryBuilders.matchPhraseQuery("字段名", "字段值");
                        builder.query(matchPhraseQueryBuilder);//短语匹配,查询的“字段值”不可拆分查询;
                 */
                .query(boolQueryBuilder)
                /**
                 * 3.过滤:builder.postFilter(QueryBuilders.rangeQuery("age").gt(25).lt(35));
                 */
                .postFilter(QueryBuilders.rangeQuery("age").gt(25).lt(35))
                /**
                 * 4.分页
                 *      size(int size) // 每页显示数据条数
                 *      from(int from) // (当前页码-1)*每页显示数据条数
                 */
                .size(1).from(4)
                /**
                 * 5.聚合查询:aggregation(AggregationBuilder aggregation),分为下面两种:
                 *      1.查询最大值,最小值,平均值等:AggregationBuilder aggregationBuilder = AggregationBuilders.max("maxAge").field("字段名");
                 *          为查询的结果取一个名字“maxAge”,设置那个字段值的最大值;
                 *      2.分组查询:AggregationBuilder aggregationBuilder = AggregationBuilders.terms("ageGroup").field("字段值");
                 *          取一个名字“ageGroup”,设置根据那个字段值的分组;
                 */
                .aggregation(aggregationBuilder)//
                /**
                 * 6.排序:sort(String name, SortOrder order);
                 */
                .sort("name")
                /**
                 * 7.高亮显示,举例:
                 *      SearchSourceBuilder builder = new SearchSourceBuilder();
                 *      HighlightBuilder highlightBuilder = new HighlightBuilder();
                 *      highlightBuilder.preTags("<font color='red'>");
                 *      highlightBuilder.postTags("</font>");
                 *      highlightBuilder.field("字段名");
                 *      builder.highlighter(highlightBuilder);
                 */
                .highlighter(highlightBuilder);

        /**
         * 将SearchSourceBuilder对象设置到SearchRequest对象里面
         */
        request.source(builder);
        /**
         * 客户端调用“search”方法
         */
        SearchResponse response = esClient.search(request, RestClientConfig.COMMON_OPTIONS);
        SearchHits hits = response.getHits();//获取查询的结果
        System.out.println(hits.getTotalHits());//获取查询的总记录数
        System.out.println(response.getTook());//获取查询花费的时间
    }
}

5.kibana的基本使用(启动容器并登录:http://192.168.236.133:5601/

 

 映射数据说明:

type:类型,Elasticsearch 中支持的数据类型非常丰富,说几个关键的:
String 类型,又分两种:
    text:可分词(查询的时候不用写完字段值也能进行查询;)
    keyword:不可分词,数据会作为完整字段进行匹配(必须写完整的字段值才能查询到该值)
Numerical:数值类型,分两类
    基本数据类型:long、integer、short、byte、double、float、half_float
    浮点数的高精度类型:scaled_float
Date:日期类型
Array:数组类型
Object:对象
index:是否索引,默认为 true,也就是说你不进行任何配置,所有字段都会被索引。
    true:字段会被索引,则可以用来进行搜索(能被查询)
    false:字段不会被索引,不能用来搜索(不能被查询到)
store:是否将数据进行独立存储,默认为 false
    原始的文本会存储在_source 里面,默认情况下其他提取出来的字段都不是独立存储的,是从_source 里面提取出来的。当然你也可以独立的存储某个字段,只要设置"store": true 即可,获取独立存储的字段要比从_source 中解析快得多,但是也会占用更多的空间,所以要根据实际业务需求来设置。
analyzer:分词器,这里的 ik_max_word 即使用 ik 分词器,后面会有专门的章节学习 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值