spring cloud 整合elasticsearch 创建索引支持ik中文分词和拼音分词

 环境:jdk1.8、spring cloud Greenwich.SR6、spring boot 2.1.9、elasticsearch-7.5.0(整合ik,拼音分词)

下载 elasticsearch:

官网:下载 Elastic 产品 | Elastic

如果不想自己集成分词器或者官网下载太慢可通过这个地址下载:elasticsearch下载7.x-搜索引擎文档类资源-CSDN下载

话不多说直接上代码:

1.引入elasticsearch包,elasticsearch包和lucene包一定要找到相互对应版本,pom.xml添加

 <!--start 集成elasticsearch-->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-elasticsearch</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>org.elasticsearch</groupId>
                    <artifactId>elasticsearch</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.elasticsearch.client</groupId>
                    <artifactId>elasticsearch-rest-client</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.elasticsearch.client</groupId>
                    <artifactId>elasticsearch-rest-high-level-client</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.elasticsearch</groupId>
            <artifactId>elasticsearch</artifactId>
            <version>7.5.0</version>
        </dependency>
        <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-client</artifactId>
            <version>7.5.0</version>
        </dependency>
        <dependency>
            <groupId>org.elasticsearch.client</groupId>
            <artifactId>elasticsearch-rest-high-level-client</artifactId>
            <version>7.5.0</version>
        </dependency>
        <!--start 集成lucene 索引-->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-core</artifactId>
            <version>8.3.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-analyzers-common</artifactId>
            <version>8.3.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-queryparser</artifactId>
            <version>8.3.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-analyzers-smartcn</artifactId>
            <version>8.3.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-highlighter</artifactId>
            <version>8.3.0</version>
        </dependency>
        <!--end  集成lucene 索引-->

 2.添加elasticsearch连接配置,application.yml添加

es:
    username: test
    password: test
    cluster-nodes: 127.0.0.1:9200
    cluster-name: elastic-test
    connectTimeOut: 500
    socketTimeOut: 1500
    connectionRequestTimeOut: 500
    maxConnectNum: 200
    maxConnectPerRoute: 100

3.连接elasticsearch,创建ElasticsearchConfig 类

import lombok.Data;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;

/**
 * @author jiuwu
 * @className: EsConfig
 * @description: Es配置
 * @create 2022年2月11日09:55:26
 **/
@Data
@Component
public class ElasticsearchConfig {

    /** SCHEME */
    private static final String SCHEME = "http";
    /**
     * 用户名
     */
    @Value("${es.username}")
    private String username;
    /**
     * 密码
     */
    @Value("${es.password}")
    private String password;

    /**
     * 环境,多个用,隔开
     */
    @Value("${spring.profiles.active}")
    private String active;
    /**
     * 集群地址,多个用,隔开
     */
    @Value("${es.cluster-nodes}")
    private String clusterNodes;
    /**
     * 连接超时时间
     */
    @Value("${es.connectTimeOut}")
    private int connectTimeOut;
    /**
     * 连接超时时间
     */
    @Value("${es.socketTimeOut}")
    private int socketTimeOut;
    /**
     * 获取连接的超时时间
     */
    @Value("${es.connectionRequestTimeOut}")
    private int connectionRequestTimeOut;
    /**
     * 最大连接数
     */
    @Value("${es.maxConnectNum}")
    private int maxConnectNum;
    /**
     * 最大路由连接数
     */
    @Value("${es.maxConnectPerRoute}")
    private int maxConnectPerRoute;

    /**
     * 获取主机
     *
     */
    private HttpHost[] getHostList() {
        String[] splits = clusterNodes.split(",");
        HttpHost[] httpHosts = new HttpHost[splits.length];
        for(int i = 0; i < splits.length; i++){
            String split = splits[i];
            httpHosts[i] = new HttpHost(split.split(":")[0], Integer.valueOf(split.split(":")[1]), SCHEME);
        }
        return httpHosts;
    }

    /**
     * ES客户端
     *
     */
    @Bean(destroyMethod = "close")
    public RestHighLevelClient client() {

        RestClientBuilder builder = RestClient.builder(getHostList());
        // 异步httpclient连接延时配置
        builder.setRequestConfigCallback(requestConfigBuilder -> {
            requestConfigBuilder.setConnectTimeout(connectTimeOut);
            requestConfigBuilder.setSocketTimeout(socketTimeOut);
            requestConfigBuilder.setConnectionRequestTimeout(connectionRequestTimeOut);
            return requestConfigBuilder;
        });
        // 异步httpclient连接数配置
        builder.setHttpClientConfigCallback(httpClientBuilder -> {
            httpClientBuilder.setMaxConnTotal(maxConnectNum);
            httpClientBuilder.setMaxConnPerRoute(maxConnectPerRoute);
            return httpClientBuilder;
        });
        RestHighLevelClient client = new RestHighLevelClient(builder);
        return client;
    }
}

4.创建数据model

import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.springframework.data.annotation.Id;
import org.springframework.data.elasticsearch.annotations.Document;
import org.springframework.data.elasticsearch.annotations.Field;
import org.springframework.data.elasticsearch.annotations.FieldType;


/**
 * @author jiuwu
 * @className: ProductIndex 
 * @description: 商品索引VO
 * @create 2021年2月11日09:52:16
 **/
@Data
@Document(indexName = "product", type = "_doc", shards = 1, replicas = 0)
@ApiModel(value = "商品索引")
public class ProductIndexVO {

    @Id
    @ApiModelProperty(value = "商品id")
    private Long productId;

    @Field(type = FieldType.Text, analyzer ="ik_max_word", searchAnalyzer = "ik_smart")
    @ApiModelProperty(value = "商品名称")
    private String productName;


    @Field(type = FieldType.Text, analyzer ="ik_max_word", searchAnalyzer = "ik_smart")
    @ApiModelProperty(value = "品牌名称")
    private String brandName;
}

5.ElasticsearchConfig 类添加model->组装mapping方法,这里反射写成公共的方法方便后期使用

/**
     * 通过Class反射->组装设置Mapping
     * @param clazz
     * @return
     */
    public static XContentBuilder generateBuilder(Class clazz) {
        try {
            java.lang.reflect.Field[] fields = clazz.getDeclaredFields();
            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();
            {
                builder.startObject("properties");
                {
                    for(java.lang.reflect.Field field : fields){
                        String name = field.getName();
                        if( field.isAnnotationPresent(org.springframework.data.elasticsearch.annotations.Field.class)){
                            //打开私有访问
                            org.springframework.data.elasticsearch.annotations.Field f=field.getAnnotation(org.springframework.data.elasticsearch.annotations.Field.class);
                            // es7及以后去掉了映射类型--person
                            builder.startObject(name);
                            {
                                /**数据类型 必须小写 **/
                                String typeName=f.type().name().toLowerCase();
                                builder.field("type",  f.type().name().toLowerCase());
                                if(null !=f && null !=f.analyzer() && !"".equals(f.analyzer())){
                                    builder.field("fielddata",  true);
                                    builder.field("term_vector","with_positions_offsets");
                                    builder.startObject("fields");
                                    /**ik中文分词**/
                                    builder.startObject("keyword_ik");
                                    builder.field("type",typeName);
                                    /**创建索引时细粒度分词 **/
                                    builder.field("analyzer", f.analyzer());
                                    /**搜索时粗粒度分词 **/
                                    builder.field("search_analyzer", "ik_smart");
                                    builder.endObject();
                                    /** 拼音分词 **/
                                    builder.startObject("keyword_pinyin");
                                    builder.field("type",typeName);
                                    builder.field("analyzer", "pinyin");
                                    builder.field("search_analyzer", "pinyin");
                                    builder.endObject();
                                    /** 拼音分词 **/
                                    builder.startObject("keyword_english");
                                    builder.field("type",typeName);
                                    builder.field("analyzer", "english");
                                    builder.endObject();
                                    /**全词匹配查询**/
                                    builder.startObject("keyword");
                                    builder.field("type","keyword");
                                    builder.field("ignore_above", 256);
                                    builder.endObject();
                                    builder.endObject();
                                }
                            }
                            builder.endObject();
                        }
                    }
                }
                builder.endObject();
            }
            builder.endObject();
            builder.close();
            return builder;
        }catch (Exception e){
            e.printStackTrace();
            return null;
        }
    }

5.创建索引枚举类

/**
 * @author jiuwu
 * @className: CodeEnum
 * @description: 索引代码枚举类
 * @create 2022年2月11日11:41:52
 **/
public enum IndexEnum {


    TEST("test", "测试索引")
    ;

    private String code;
    private String msg;

    IndexEnum(String code, String msg) {
        this.code = code;
        this.msg = msg;
    }


    public String getCode() {
        return this.code;
    }

    public void setCode(String code) {
        this.code = code;
    }

    public String getMsg() {
        return this.msg;
    }

    public void setMsg(String msg) {
        this.msg = msg;
    }
}

6.创建索引

    @Autowired
    private ElasticsearchConfig esConfig;

    public RestHighLevelClient restHighLevelClient() {

        return  esConfig.client();
    }

    /**
     * 创建索引
     * @param indexEnum 索引信息
     * @param xContentBuilder 索引字段->创建mapping
     * @return
     */
    public boolean createIndex(IndexEnum indexEnum, XContentBuilder xContentBuilder) {
        RestHighLevelClient client=this.restHighLevelClient();
        try {
            //存在返回
            if (existsIndex(indexEnum)) {
                return true;
            }
            String index = indexEnum.getCode();
            CreateIndexRequest request = new CreateIndexRequest(index);
            request.settings(Settings.builder()
                    // 设置分片数和副本数
                    .put("index.number_of_shards", 5)
                    .put("index.number_of_replicas", 2)
                    // 设置返回最大条数
                    .put("index.max_result_window", Integer.MAX_VALUE)
                    //  设置ik为默认分词
                    // .put("index.analysis.analyzer.ik.tokenizer","ik_max_word")
                    .put("index.analysis.analyzer.default.type", "ik_smart")
                    //  设置英文分词
                    .put("index.analysis.analyzer.english.tokenizer", "standard")
                    // 设置拼音分词
                    .put("index.analysis.analyzer.pinyin_analyzer.tokenizer", "my_pinyin")
                    .put("index.analysis.tokenizer.my_pinyin.type", "pinyin")
                    //true:支持首字母
                    .put("index.analysis.tokenizer.my_pinyin.keep_first_letter", true)
                    //false:首字母搜索只有两个首字母相同才能命中,全拼能命中
                    //true:任何情况全拼,首字母都能命中
                    .put("index.analysis.tokenizer.my_pinyin.keep_separate_first_letter", false)
                    //true:支持全拼  eg: 雅诗兰黛 -> [ya,shi,lan,dai]
                    .put("index.analysis.tokenizer.my_pinyin.keep_full_pinyin", true)
                    .put("index.analysis.tokenizer.my_pinyin.keep_original", true)
                    //设置最大长度
                    .put("index.analysis.tokenizer.my_pinyin.limit_first_letter_length", 16)
                    .put("index.analysis.tokenizer.my_pinyin.lowercase", true)
                    //重复的项将被删除,eg: 德的 -> de
                    .put("index.analysis.tokenizer.my_pinyin.remove_duplicated_term", true)

            );
            log.info("xContentBuilder:{}", JSON.toJSONString(xContentBuilder.getOutputStream().toString()));
            if (null != xContentBuilder) {
                request.mapping(xContentBuilder);
            }
            request.setTimeout(TimeValue.timeValueMillis(50000));
            CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT);
            /** 指示是否所有节点都已确认请求 **/
            boolean acknowledged = createIndexResponse.isAcknowledged();
            /** 指示是否在超时之前为索引中的每个分片启动了必需的分片副本数 **/
            boolean shardsAcknowledged = createIndexResponse.isShardsAcknowledged();
            if (acknowledged || shardsAcknowledged) {
                log.info(" create lucene succeed !lucene_name:[{}]:result:{}", request, JSON.toJSONString(createIndexResponse.toString()));
                return true;
            }
            log.info(" create lucene defeated !lucene_name:[{}]:result:{}", index, JSON.toJSONString(createIndexResponse.toString()));
            return false;
        } catch (Exception e) {
            e.printStackTrace();
            return false;
        }finally {
            try {
                client.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * 判断索引是否存在
     * @param indexEnum 索引信息枚举
     * @return
     */
    public boolean existsIndex(IndexEnum indexEnum) {
        RestHighLevelClient client = this.restHighLevelClient();
        try {
            GetIndexRequest request = new GetIndexRequest(indexEnum.getCode());
            boolean exists = client.indices().exists(request, RequestOptions.DEFAULT);
            log.info("existsIndex: " + exists);
            return exists;
        } catch (Exception e) {
            throw new BizException("搜索引擎异常:"+e.getMessage());
        }finally {
            try {
                client.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

7.调用测试

 public void createIndex(){
        XContentBuilder xContentBuilder = ElasticsearchConfig.generateBuilder(ProductIndex.class);
        this.esService.createIndex(IndexEnum.TEST, xContentBuilder);
    }

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值