安装elasticsearch、kibana
1.创建网络 es和kibana要在一个网络下
docker newwork create es-net
2.安装es
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx256m" -v es-data:/usr/share/elasticsearch/data -v es-plugins:/usr/share/elasticsearch/plugins --network=es-net elasticsearch:7.12.1
3.安装kibana
docker run -d --name kibana -e ELASTICSEARCH_HOST=http:9200 --network=es-net -p 5601:5601 kibana:7.12.1
分词器
#默认分词器不能区分汉语
POST /_analyze
{
"text":"今天学习es很难",
"analyzer":"english"
}
#使用ik分词器(两种方式)
#方式一直接下载
docker exec -it elasticsearch /bin/bash# 进入容器内部
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.12.1/elasticsearch-analysis-ik-7.12.1.zip# 在线下载并安装
exit#退出
docker restart elasticsearch#重启容器
#方式二先下载好解压到挂载plugins对应的_data文件夹下重启es
POST /_analyze
{
"text":"今天学习es很简单",
"analyzer":"ik_smart"
}
何时使用分词器
1.将文档创建倒排索引的时候
2.用户在查询的时候
ik分词有两种(ik_smart,ik_max_word )
ik_smart最少切分 【今天,学习,es,很简单】
ik_max_word最细切分 【今天,学习,es,很简单,简单】
该词库有一些网络用语或者新的词汇可能不存在所以需要在ik的config文件夹的IKAnalyzer.cfg.xml文件中添加新的词库或者设置黑名单词库
索引库
mapping是对索引库中文档的约束,常见的mapping属性包括:
▪ type:字段数据类型,常见的简单类型有:
字符串:text(可分词的文本)、keyword(精确值,例如:品牌、国家、ip地址)
数值:long、integer、short、byte、double、float、
布尔:boolean
日期:date
对象:object
▪ index:是否创建索引,默认为true
▪ analyzer:使用哪种分词器
▪ properties:该字段的子字段
//创建索引库
PUT /firstput
{
"mappings": {
"properties": {
"name":{
"type": "text",
"analyzer": "ik_smart"
},
"des":{
"type": "text",
"analyzer": "ik_smart"
},
"score":{
"type": "object",
"properties": {
"math":{
"type": "integer",
"index": false
},
"chinese":{
"type": "integer",
"index": false
}
}
}
}
}
}
//查看索引库
GET /索引库名
//删除索引库
DELETE /索引库名
//修改索引库 (需要后面加上_mapping)
PUT /firstput/_mapping
{
"mappings": {
"properties": {
"新字段名":{
"type": "text",
"analyzer": "ik_smart"
}
}
}
}
//实际搜索中可能存在用户从多个角度同时搜索 如姓名和描述同时搜索这是我们可以使用copy_to属性将当前字段拷贝到指定字段
"name": {
"type": "text",
"analyzer": "ik_smart",
"copy_to": "all"
},
"des": {
"type": "text",
"analyzer": "ik_smart",
"copy_to": "all"
},
"all": {
"type": "text",
"analyzer": "ik_smart"
}
文档操作
//新增
POST /索引库名/_doc/文档id
{
"字段1": "值1",
"字段2": "值2",
"字段3": {
"子属性1": "子属性值1",
"子属性2": "子属性值2",
}
}
//查看
GET /索引库名/_doc/文档id
//删除
DELETE /索引库名/_doc/文档id
//修改(方式一)
PUT /索引库名/_doc/文档id
{
"字段1": "值1",
"字段2": "值2",
"字段3": {
"子属性1": "子属性值1",
"子属性2": "子属性值2",
}
}
//修改(方式二)
POST /索引库名/_update/文档id
{
"doc":{
"字段名":"新的值”
}
}
//测试
POST /firstput/_doc/111
{
"name":"张三",
"des":"是个好学生",
"score":{
"math":99,
"chinese":98
}
}
POST /firstput/_doc/111
{
"name":"李四",
"des":"学习一般",
"score":{
"math":85,
"chinese":87
}
}
RestClient操作索引库
RestClient 实际上就是 es 官方提供的各种语言的客户端,他的作用就是帮助我们组装 DSL 语句,然后发送 http 请求给 es 服务器,而我们只需要通过 java 代码将请求发送给客户端,然后客户端就会帮我们来处理剩下的这些事情
官网: https://www.elastic.co/guide/en/elasticsearch/client/index.html
初始化JavaRestClient
- 引入 es 的 RestHighLevelClient 依赖
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-high-level-client</artifactId>
</dependency>
- 因为SpringBoot默认的ES版本是7.6.2,所以我们需要覆盖默认的ES版本
<properties>
<java.version>1.8</java.version>
<elasticsearch.version>7.12.1</elasticsearch.version>
</properties>
- 初始化 RestHighLevelClient
package com.cn.config;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class ElasticsearchConfig {
// /**
// * 有密码初始化
// *
// * @return
// */
// @Bean(destroyMethod = "close")
// public RestHighLevelClient restHighLevelClient1() {
// // 设置密码
// BasicCredentialsProvider provider = new BasicCredentialsProvider();
// provider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username, password));
// RestHighLevelClient restHighLevelClient = new RestHighLevelClient(
// RestClient.builder(setHost()).setHttpClientConfigCallback(
// new RestClientBuilder.HttpClientConfigCallback() {
// @Override
// public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpAsyncClientBuilder) {
// httpAsyncClientBuilder.disableAuthCaching();
// return httpAsyncClientBuilder.setDefaultCredentialsProvider(provider);
// }
// }
// ));
// return restHighLevelClient;
// }
/**
* 无密码初始化
* @return
*/
@Bean(destroyMethod = "close")
public RestHighLevelClient restHighLevelClient() {
RestHighLevelClient restHighLevelClient = new RestHighLevelClient(RestClient.builder(setHost()));
// RestHighLevelClient restHighLevelClient = new RestHighLevelClient(RestClient.builder( HttpHost.create("http://192.168.40.128:9200")));
return restHighLevelClient;
}
// private HttpHost[] setHost(){
// String[] split = hosts.split(",");
// HttpHost[] hhs = new HttpHost[split.length];
// for (int i = 0; i < split.length; i++) {
// String host = split[i];
// String[] ht = host.split(":");
// hhs[i] = new HttpHost(ht[0], Integer.parseInt(ht[1]), "http");
// }
// return hhs;
// }
}
- 创建索引库
package com.cn.controller;
import com.alibaba.fastjson.JSONObject;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.CreateIndexResponse;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import java.io.IOException;
/**
* EScontroller
*
* @author wangyq
* @create 2024/3/5
*/
@RestController
public class EScontroller {
@Autowired
private RestHighLevelClient restHighLevelClient;
//索引库名字
private static final String index ="es_org_index";
//字段映射
private static final String source="{\n" +
" \"mappings\": {\n" +
" \"properties\": {\n" +
" \"ORGANIZATION_ID\":{\n" +
" \"type\": \"keyword\"\n" +
" },\n" +
" \"ORGANIZATION_NAME\":{\n" +
" \"type\": \"text\",\n" +
" \"analyzer\": \"ik_smart\"\n" +
" },\n" +
" \"ZONING_CODE\":{\n" +
" \"type\": \"keyword\"\n" +
" },\n" +
" \"ORGANIZATION_ADDRESS\":{\n" +
" \"type\": \"text\",\n" +
" \"analyzer\": \"ik_smart\"\n" +
" },\n" +
" \"ORGANIZATION_STATUS\":{\n" +
" \"type\": \"integer\"\n" +
" },\n" +
" \"ORGANIZATION_EMAIL\":{\n" +
" \"type\": \"text\"\n" +
" },\n" +
" \"ORGANIZATION_DESCRIPTION\":{\n" +
" \"type\": \"text\",\n" +
" \"index\": false\n" +
" },\n" +
" \"QUALIFICATION\":{\n" +
" \"type\": \"text\"\n" +
" }\n" +
" }\n" +
" }\n" +
"}";
@GetMapping("addIndex")
public void checkCreate() throws IOException {
// 删除索引库
// DeleteIndexRequest deleteRequest = new DeleteIndexRequest(index);
// restHighLevelClient.indices().delete(deleteRequest,RequestOptions.DEFAULT);
// 索引参数,相当于数据库中的一张表
GetIndexRequest getIndexRequest = new GetIndexRequest(index);
try {
//判断该索引库是否存在
boolean exists = restHighLevelClient.indices().exists(getIndexRequest, RequestOptions.DEFAULT);
if (!exists) {
//创建索引库
CreateIndexRequest request = new CreateIndexRequest(index);
//准备请求的参数 DSL语句
request.source(source, XContentType.JSON);
//发送请求
CreateIndexResponse createIndexResponse = restHighLevelClient.indices().create(request, RequestOptions.DEFAULT);
System.out.println(JSONObject.toJSONString(createIndexResponse));
}
} catch ( IOException e) {
e.printStackTrace();
}
}
}
RestClient操作文档
- 创建文档
@GetMapping("add3")
public IndexResponse add3(Org org) throws IOException {
//创建json格式文档
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
{
builder.field("ORGANIZATION_ID", org.getOrgid());
builder.field("ORGANIZATION_NAME", org.getOrgname());
builder.field("ZONING_CODE", org.getZoingcode());
builder.field("ORGANIZATION_ADDRESS", org.getOrgadress());
builder.field("ORGANIZATION_STATUS", org.getOrgstatus());
builder.field("ORGANIZATION_EMAIL", org.getOrgemail());
builder.field("ORGANIZATION_DESCRIPTION", org.getOrgdesc());
builder.field("QUALIFICATION", org.getQua());
}
builder.endObject();
//创建文档id
IndexRequest request = new IndexRequest("es_org_index").id( org.getOrgid());
//组装文档数据源
request.source(builder);
IndexResponse response = null;
//发送请求
try {
response = restHighLevelClient.index(request, RequestOptions.DEFAULT);
} catch (IOException e) {
e.printStackTrace();
}
return response;
}
- 查询文档
@GetMapping("getdoc")
public String getdoc (@RequestParam("id") String id) throws IOException {
GetRequest request = new GetRequest(index,id);
GetResponse documentFields = restHighLevelClient.get(request, RequestOptions.DEFAULT);
return documentFields.toString();
}
- 修改文档
//两种方式
//(方式一:与新增相同 全局更新)
//(方式二:局部更新)
@GetMapping("updatedoc")
public String updatedoc(@RequestParam("id") String id) throws IOException {
UpdateRequest request = new UpdateRequest(index,id);
request.doc("ORGANIZATION_NAME","建始县红岩寺镇卫生院1","ORGANIZATION_ADDRESS","建始县居委会二组1");
UpdateResponse update = restHighLevelClient.update(request, RequestOptions.DEFAULT);
return update.toString();
}
- 删除文档
@GetMapping("deldoc")
public String deldoc(@RequestParam("id") String id) throws IOException {
DeleteRequest request = new DeleteRequest(index,id);
DeleteResponse delete = restHighLevelClient.delete(request, RequestOptions.DEFAULT);
return delete.toString();
}
- 批量创建
@GetMapping("addall")
public BulkResponse addall() throws Exception {
List<Org> getorg = reportTplMapper.getallorg( );
BulkRequest request = new BulkRequest( );
getorg.stream( ).forEach(r -> {
XContentBuilder builder = null;
try {
builder = XContentFactory.jsonBuilder( );
builder.startObject( );
builder.field("ORGANIZATION_ID", r.getOrgid( ));
builder.field("ORGANIZATION_NAME", r.getOrgname( ));
builder.field("ZONING_CODE", r.getZoingcode( ));
builder.field("ORGANIZATION_ADDRESS", r.getOrgadress( ));
builder.field("ORGANIZATION_STATUS", r.getOrgstatus( ));
builder.field("ORGANIZATION_EMAIL", r.getOrgemail( ));
builder.field("ORGANIZATION_DESCRIPTION", r.getOrgdesc( ));
builder.field("QUALIFICATION", r.getQua( ));
builder.endObject( );
} catch (IOException e) {
e.printStackTrace( );
}
request.add(new IndexRequest(index).id(r.getOrgid( )).source(builder));
});
BulkResponse bulk = null;
try {
bulk = restHighLevelClient.bulk(request, RequestOptions.DEFAULT);
} catch (IOException e) {
e.printStackTrace( );
}
return bulk ;
}
Elasticsearch的DSL
DSL Query的分类
Elasticsearch提供了基于JSON的DSL来定义查询。常见的查询类型包括:
▪ 查询所有:查询出所有数据,一般测试用。例如:
▫ match_all:匹配所有文档并返回它们;
▪ 全文检索(full text)查询:利用分词器对用户输入内容分词,然后去倒排索引库中匹配。例如:
▫ match_query:针对单个字段执行全文本查询;
▫ multi_match_query:针对多个字段执行全文本查询的一种查询;
▪ 精确查询:根据精确词条值查找数据,一般是查找keyword、数值、日期、boolean等类型字段。例如:
▫ ids:查询指定的一组文档,基于它们的ID进行匹配;
▫ range:用于执行范围查询的查询,可以匹配一个数字或日期字段的范围;
▫ term:对查询字符串不进行分词,直接与指定的字段进行比较;
▪ 地理(geo)查询:根据经纬度查询。例如:
▫ geo_distance:查找与指定地理位置之间的距离在一定范围内的文档的查询;
▫ geo_bounding_box:查找落在指定矩形框中的文档的查询;
▪ 复合(compound)查询:复合查询可以将上述各种查询条件组合起来,合并查询条件。例如:
▫ function_score:在查询结果的排序过程中,根据指定的函数对文档进行打分的查询;
DSL基本语法
GET /索引名/_search
{
"query": {
"查询类型": {
"查询条件":"条件值"
}
}
}
查询所有 - match_all
//查询所有 - match_all
GET /es_org_index/_search
{
"query": {
"match_all": {
}
}
}
全文检索查询
match
//全文检索查询 -----------match 查询
GET /es_org_index/_search
{
"query": {
"match": {
"ORGANIZATION_NAME": "卫生院"
}
}
}
multi_match
//全文检索查询 -----------multi_match 查询(类似与copy_to)
//(建议使用copy_to因为参与搜索字段越多查询越慢)
GET /es_org_index/_search
{
"query": {
"multi_match": {
"query": "卫生院",
"fields": ["ORGANIZATION_NAME","ORGANIZATION_ADDRESS"]
}
}
}
精确查询
ids
//根据文档的id进行查询
GET /es_org_index/_search
{
"query": {
"ids": {
"values": ["N1001IJ9LEZ0F9D6A6P1","N1002ELIOQUMKVWZ81LY"]
}
}
}
term
//精确查询(一般是查找keyword、数值、日期、boolean等类型字段 不分词查询)必须与词条完全一致
GET /es_org_index/_search
{
"query": {
"term": {
"ZONING_CODE": {
"value": "610727101000"
}
}
}
}
range
//对某个字段范围查询 gt > gte>=
GET /es_org_index/_search
{
"query": {
"range": {
"QUALIFICATION": {
"gte": 0,
"lte": 2
}
}
}
}
地理坐标查询
geo_distance
//以点位中心距离为半径的圆形
{
"query": {
"geo_distance": {
"distance": "10km",
"location": {
"lat": 40.73,
"lon": -73.98
}
}
}
}
geo_bounding_box
//两点开展xy轴形成一个矩形
{
"query": {
"geo_bounding_box": {
"location": {
"top_left": {
"lat": 40.73,
"lon": -74.1
},
"bottom_right": {
"lat": 40.01,
"lon": -71.12
}
}
}
}
}
复合查询
function_score
//根据特定的方式进行算分排序
GET /es_org_index/_search
{
"query": {
"function_score": {
"query": {
"match": {
"ORGANIZATION_NAME": "卫生院" //原始条件,搜索文档并根据相关行算分(query_score)
}
},
"functions": [
{
"filter": {
"term": {
"ZONING_CODE": "422822102000" //符合条件的才会被重新算分
}
},"weight":10 //算分函数结果成为(function_score)
//weight:给一个常量作为结果
//field_value_factor:用文档某个字段的值作为结果
//random_score:随机生成一个值作为结果
//script_score:自定义公式的结果
}
],
"boost_mode": "multiply" //加权模式定义query_score和function_score的运算方式
//multiply:两者相乘
//replace:用function_score替换query_score
//其他:sum、avg、max、min
}
}
}
boolean query
//组合查询
//must:必须匹配给个子查询 类似于“与”
//should:选择性匹配 “或”
//must_not必须不匹配,不参与算分 “非”
//filter:必须匹配,不参与算分
GET /es_org_index/_search
{
"query": {
"bool": {
"must": [
{"term": {
"FIELD": {
"ORGANIZATION_NAME": "卫生院"
}
}}
],
"should": [
{"term": {"ZONING_CODE":"440983120000"}},
{"term": {"ZONING_CODE":"440983120000"}}
],
"must_not": [
{"range": {
"FIELD": {
"gte": 10,
"lte": 20
}
}}
]
}
}
}
搜索结果处理
排序
默认的排序是根据相关度算分(_score)来排序的。可以排序的字段类型有:keyword、数值类型、地理坐标类型、日期类型等。如果使用了排序就不进行打分了
//keyword、数值类型、日期类型使用该方式排序
//sort是一个数组可以多字段排序
GET /es_org_index/_search
{
"query": {
"match": {
"ORGANIZATION_NAME": "卫生院"
}
},
"sort": [
{
"ZONING_CODE": {
"order": "asc"
}
}
]
}
//地理坐标排序
GET /es_org_index/_search
{
"query": {
"match": {
"ORGANIZATION_NAME": "卫生院"
}
},
"sort": [
{
"_geo_distance": {
"FIELD": {
"lat": 40,
"lon": -70
},
"order": "asc",
"unit": "km"
}
}
]
}
分页
elasticsearch 默认情况下只返回top10的数据。而如果要查询更多数据就需要修改分页参数了。
elasticsearch中通过修改from、size参数来控制要返回的分页结果:
//该方式适用于单点不适用于集群
//from分页开始位置 size希望获取文档总数 limit 20,10
GET /es_org_index/_search
{
"query": {
"match": {
"ORGANIZATION_NAME": "卫生院"
}
},
"from": 20,
"size": 10
}
集群分页
ES是分布式的,所以会面临深度分页问题。例如按 price 排序后,获取 from = 990,size =10 的数据:
- 首先在每个数据分片上都排序并查询前1000条文档。
- 然后将所有节点的结果聚合,在内存中重新排序选出前1000条文档。
- 最后从这1000条中,选取从990开始的10条文档。
如果搜索页数过深,或者结果集(from + size)越大,对内存和 CPU 的消耗也越高。因此 ES 设定结果集查询的上限是10000。
深度分页解决方案:
针对深度分页,ES提供了两种解决方案:
- search after:分页时需要排序,原理是从上一次的排序值开始,查询下一页数据。官方推荐使用的方式。
- scroll:原理将排序数据形成快照,保存在内存。官方已经不推荐使用。
高亮
高亮处理,就是在搜索结果中把搜索关键字突出显示。
原理:
- 将搜索结果中的关键字用标签标记出来。
- 在页面中给标签添加 CSS 样式。
GET /es_org_index/_search
{
"query": {
"match": {
"ORGANIZATION_ADDRESS": "卫生院"
}
},
"highlight": {
"fields": { //指定高亮的字段
"ORGANIZATION_NAME":{
"require_field_match": "false", //默认为搜索字段和高亮字段必须一致 解决方式加上该配置
"pre_tags": "<em>", //用来标记高亮字段的前置标签
"post_tags": "<em>" //用来标记高亮字段的后置标签
}
}
}
}
RestClient查询文档
matchAll
@GetMapping("querymachall")
public String querymachall() throws IOException {
//设置索引库
SearchRequest searchRequest = new SearchRequest(index);
//设置查询条件
searchRequest.source().query(QueryBuilders.matchAllQuery());
//发送请求
SearchResponse search = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
SearchHits hits = search.getHits( );
//总条数
long value = hits.getTotalHits( ).value;
//结果集
SearchHit[] hitsList = hits.getHits( );
//遍历结果集查询结果
for (SearchHit documentFields : hitsList) {
String json= documentFields.getSourceAsString();
System.out.println(json );
}
return search.toString();
}
match
//按条件查询
searchRequest.source().query(QueryBuilders.matchQuery("ORGANIZATION_ADDRESS","卫生院"));
multi_match
searchRequest.source().query(QueryBuilders.multiMatchQuery("卫生院", "ORGANIZATION_ADDRESS","ORGANIZATION_NAME"));
term
searchRequest.source().query(QueryBuilders.termQuery("ZONING_CODE", "610727101000"));
range
searchRequest.source().query(QueryBuilders.rangeQuery("QUALIFICATION").gte(0).lt(1));
组合查询
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.must(QueryBuilders.matchQuery("ORGANIZATION_NAME","卫生院"));
boolQueryBuilder.should(QueryBuilders.termQuery("ZONING_CODE","440983120000"));
searchRequest.source().query(boolQueryBuilder);
RestClient搜索结果处理
@GetMapping("queryterm")
public List<Org1> queryterm() throws IOException {
List<Org1> list = new ArrayList<>();
SearchRequest searchRequest = new SearchRequest(index);
// //复合查询
// BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
// boolQueryBuilder.must(QueryBuilders.matchQuery("ORGANIZATION_NAME","卫生院"));
// boolQueryBuilder.filter(QueryBuilders.termQuery("ORGANIZATION_STATUS","1"));
// searchRequest.source().query(boolQueryBuilder);
//查询条件
searchRequest.source().query(QueryBuilders.matchQuery("ORGANIZATION_NAME","卫生院"));
//分页
searchRequest.source().from(0);
searchRequest.source().size(20);
//排序
searchRequest.source().sort("ZONING_CODE", SortOrder.ASC);
//高亮
HighlightBuilder highlightBuilder = new HighlightBuilder();
highlightBuilder.field("ORGANIZATION_NAME").requireFieldMatch(false);
highlightBuilder.preTags(HighlightBuilder.DEFAULT_STYLED_PRE_TAG);
highlightBuilder.postTags(HighlightBuilder.DEFAULT_STYLED_PRE_TAG);
searchRequest.source().highlighter(highlightBuilder);
//距离排序
searchRequest.source().sort(SortBuilders.geoDistanceSort("location"
,new GeoPoint(31.12,121.21))
.order(SortOrder.ASC)
.unit(DistanceUnit.KILOMETERS));
SearchResponse search = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
// return search.toString();
SearchHits hits = search.getHits( );
//总条数
long value = hits.getTotalHits( ).value;
//结果集
SearchHit[] hitsList = hits.getHits( );
//遍历结果集查询结果
for (SearchHit documentFields : hitsList) {
//获取当前结果
String json= documentFields.getSourceAsString();
//反序列化到对象中
Org1 org1 = JSON.parseObject(json, Org1.class);
//获取高亮的字段
String organization_name = documentFields.getHighlightFields( ).get("ORGANIZATION_NAME").getFragments()[0].toString();
org1.setORGANIZATION_NAME(organization_name);
list.add(org1);
System.out.println(org1 );
}
return list;
}
算分
SearchRequest searchRequest = new SearchRequest(index);
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder( );
boolQueryBuilder.must(QueryBuilders.matchQuery("ORGANIZATION_NAME", "卫生院"));
boolQueryBuilder.filter(QueryBuilders.termQuery("ORGANIZATION_STATUS", "1"));
//构建算分函数
FunctionScoreQueryBuilder functionScoreQueryBuilder =
QueryBuilders.functionScoreQuery(
boolQueryBuilder//原始查询
//functionscore
, new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
QueryBuilders.termQuery("ISad", true)//满足条件
, ScoreFunctionBuilders.weightFactorFunction(5)//权重
)
});
//查询条件
searchRequest.source( ).query(functionScoreQueryBuilder);