一、spring-data-elasticsearch 操作es
1.创建实体类数据entity
@Document(indexName = "employee")
@Setting(
shards = 1, // 默认索引分区数
replicas = 0, // 每个分区的备份数
refreshInterval = "-1" // 刷新间隔
)
@Data
public class Employee {
private String id;
@Field(analyzer = FieldAnalyzer.IK_MAX_WORD, type = FieldType.Text)
private String userName;
private String about;
private String[] interests;
@Field(fielddata = true)
private Long age;
}
创建IK分词器工具类
public class FieldAnalyzer {
/**
* IK 最大化分词
*
* 会将文本做最细粒度的拆分
*/
public static final String IK_MAX_WORD = "ik_max_word";
/**
* IK 智能分词
*
* 会做最粗粒度的拆分
*/
public static final String IK_SMART = "ik_smart";
创建接口(与JPA功能相同)
public interface EmployeeRepository extends ElasticsearchRepository<Employee, Integer> {
#分页查询 根据名称like查询
Page<Employee> findByUserNameLike(String name, PageRequest pageable);
}
yml配置文件设置连接es地址
spring:
elasticsearch:
rest:
uris: http://192.168.2.233:9200 #你自己的ES地址
创建测试类
package com.es.test;
import com.es.Application;
import com.es.dataobject.Employee;
import com.es.dataobject.EmployeeRepository;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Sort;
import org.springframework.data.elasticsearch.core.ElasticsearchRestTemplate;
import org.springframework.data.elasticsearch.core.SearchHit;
import org.springframework.data.elasticsearch.core.SearchHits;
import org.springframework.data.elasticsearch.core.mapping.IndexCoordinates;
import org.springframework.data.elasticsearch.core.query.*;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.List;
import java.util.Optional;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = Application.class)
public class EsTest {
@Autowired
private EmployeeRepository productRepository;
@Autowired
private ElasticsearchRestTemplate elasticsearchTemplate;
@Test
public void add() {
// for (long i = 1; i <=50; i++) {
//
// Employee employee = new Employee();
// employee.setId(i+"");
// employee.setUserName(UUID.randomUUID().toString());
// employee.setAbout(i+"");
// employee.setAge(i);
// productRepository.save(employee);
// }
Employee employee = new Employee();
employee.setId("1");
employee.setUserName("天天八天天龙天龙天龙");
employee.setAbout("《天龙八部》");
employee.setAge(1L);
productRepository.save(employee);
}
@Test
public void testSelectById() {
Optional<Employee> employee = productRepository.findById(1);
System.out.println(employee.isPresent());
System.out.println(employee.get());
}
@Test
public void queryPage() {
Sort.TypedSort<Employee> typedSort = Sort.sort(Employee.class);
Sort sort = typedSort.by(Employee::getAge).ascending();
// .and(typedSort.by(Employee::getLastName).descending());
Query query = new CriteriaQuery(
// new Criteria("lastName").is("f6610b7f-dcad-4fd6-a06f-75f869f1803a").and(new Criteria("lastName"))
new Criteria("userName").contains("天龙八部")
, PageRequest.of(0, 10));
query.addSort(sort);
SearchHits<Employee> search = elasticsearchTemplate.search(query,Employee.class, IndexCoordinates.of(Employee.class.getSimpleName().toLowerCase()));
List<SearchHit<Employee>> searchHits = search.getSearchHits();
for (SearchHit<Employee> searchHit : searchHits) {
Employee content = searchHit.getContent();
System.out.println(content.toString());
}
}
@Test
public void queryPage2() {
PageRequest pageable = PageRequest.of(1, 10);
// 执行分页操作
Page<Employee> page = productRepository.findByUserNameLike("天", pageable);
// 打印
System.out.println(page.getTotalElements());
System.out.println(page.getTotalPages());
}
@Test
public void getIndex() {
IndexCoordinates indexCoordinatesFor = elasticsearchTemplate.getIndexCoordinatesFor(Employee.class);
System.out.println(indexCoordinatesFor);
}
@Test
public void addTemplate() {
Employee employee = new Employee();
employee.setId("1");
employee.setUserName("123456");
employee.setAbout("123456");
employee.setAge(0L);
IndexQuery query = new IndexQuery();
query.setId("1");
query.setObject(employee);
query.setVersion(1L);
elasticsearchTemplate.doIndex(query, IndexCoordinates.of(employee.getClass().getSimpleName().toLowerCase()));
}
}
二、rest-high-level-client 操作es
1. 重新构建实体类
@Data
public class Employee {
private String id;
private String userName;
private String about;
private String[] interests;
private Long age;
}
2.设置client配置类初始化bean
package com.es.config;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class ElasticSearchConfig {
@Bean
public RestHighLevelClient restHighLevelClient(){
RestHighLevelClient client = new RestHighLevelClient(
RestClient.builder(
new HttpHost("192.168.2.233", 9200, "http")));
return client;
}
}
4.测试类
Document API
package com.es.test;
import com.alibaba.fastjson.JSON;
import com.es.RestHighLevelClientEsApplication;
import com.es.dataobject.Employee;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.core.GetSourceRequest;
import org.elasticsearch.client.core.GetSourceResponse;
import org.elasticsearch.client.core.TermVectorsRequest;
import org.elasticsearch.client.core.TermVectorsResponse;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.CreateIndexResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.reindex.BulkByScrollResponse;
import org.elasticsearch.index.reindex.ReindexRequest;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.singletonMap;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = RestHighLevelClientEsApplication.class)
public class RestHighLevelClientEsDocumentApiTest {
@Qualifier("restHighLevelClient")
@Autowired
private RestHighLevelClient client;
//map
@Test
public void createIndex1() throws IOException {
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("user", "lisa");
jsonMap.put("postDate", new Date());
jsonMap.put("message", "lisa is dog");
jsonMap.put("age", 18);
IndexRequest indexRequest = new IndexRequest("posts").id("2").source(jsonMap);
// indexRequest.opType(DocWriteRequest.OpType.INDEX);
client.index(indexRequest, RequestOptions.DEFAULT);
}
//XContentBuilder
@Test
public void createIndex2() throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
builder.field("user", "kimchy");
builder.timeField("postDate", new Date());
builder.field("message", "trying out Elasticsearch");
builder.endObject();
IndexRequest indexRequest = new IndexRequest("posts")
.id("1").source(builder);
client.index(indexRequest, RequestOptions.DEFAULT);
}
//object
@Test
public void createIndex3() throws IOException {
IndexRequest indexRequest = new IndexRequest("posts")
.id("1")
.source("user", "kimchy",
"postDate", new Date(),
"message", "trying out Elasticsearch");
client.index(indexRequest, RequestOptions.DEFAULT);
}
@Test
public void createIndex4() throws IOException {
//创建索引请求对象
CreateIndexRequest indexRequest = new CreateIndexRequest("abc");
//客户端执行请求,indicesClient 请求后获得相应
CreateIndexResponse createIndexResponse = client.indices().create(indexRequest, RequestOptions.DEFAULT);
System.out.println(createIndexResponse);
}
//entity
@Test
public void createIndex6() throws IOException {
//创建对象
Employee employee = new Employee();
employee.setId("1");
employee.setUserName("张三测试");
employee.setAbout("这是一条简单的简介");
employee.setInterests(new String[]{"开发", "测试", "生产"});
employee.setAge(24L);
//创建请求
IndexRequest indexRequest = new IndexRequest(employee.getClass().getSimpleName().toLowerCase());
//规则: PUT /testdb2/_doc/1
indexRequest.id("1");
indexRequest.timeout(TimeValue.timeValueSeconds(1));
//将我们的数据放入请求
indexRequest.source(JSON.toJSONString(employee), XContentType.JSON);
//客户端发送请求,获取相应结果
IndexResponse index = client.index(indexRequest, RequestOptions.DEFAULT);
System.out.println(index);
System.out.println("status : " + index.status());
}
@Test
public void createIndex7() throws IOException {
CreateIndexRequest request = new CreateIndexRequest("posts");
// Map<String, Object> properties = new HashMap<>();
// Map<String, Object> message = new HashMap<>();
// message.put("type", "text");
// properties.put("message", message);
// Map<String, Object> mapping = new HashMap<>();
// mapping.put("properties", properties);
// request.mapping(mapping);
request.settings(Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0)
);
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
{
builder.startObject("properties");
{
builder.startObject("user");
{
builder.field("type", "text");
}
builder.endObject();
builder.startObject("postDate");
{
builder.field("type", "date");
}
builder.endObject();
builder.startObject("message");
{
builder.field("type", "date");
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
request.mapping(builder);
CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(createIndexResponse));
}
//GetRequest
@Test
public void getTest1() throws Exception {
GetRequest request = new GetRequest("posts", "1");
// GetRequest request = new GetRequest("posts", "1").version(2);
String[] includes = new String[]{"*"};
String[] excludes = Strings.EMPTY_ARRAY;
FetchSourceContext fetchSourceContext = new FetchSourceContext(true, includes, null);
request.fetchSourceContext(fetchSourceContext);
GetResponse documentFields = client.get(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(documentFields));
}
//GetSourceRequest
//shard_num = hash(_routing) % num_primary_shards
//那么自定义routing到底有什么作用呢?
//正常的一次查询(search),请求会被发给所有shard(不考虑副本),
//然后等所有shard返回,再将结果聚合,返回给调用方。如
//果我们事先已经知道数据可能分布在哪些shard上,那么就可以减少不必要的请求。
@Test
public void getTest2() throws Exception {
GetSourceRequest getSourceRequest = new GetSourceRequest("posts", "1");
String[] includes = new String[]{"*"};
getSourceRequest.fetchSourceContext(new FetchSourceContext(true, includes, null));
getSourceRequest.routing("routing");
getSourceRequest.preference("preference");
//实时标志
getSourceRequest.realtime(false);
//在检索之前进行刷新
getSourceRequest.refresh(true);
GetSourceResponse response = client.getSource(getSourceRequest, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(response));
}
@Test
public void existsTest() throws IOException {
GetRequest getRequest = new GetRequest(
"posts",
"1");
//关闭请求 source 让请求变得轻一些
getRequest.fetchSourceContext(new FetchSourceContext(false));
getRequest.storedFields("_none_");
boolean exists = client.exists(getRequest, RequestOptions.DEFAULT);
System.out.println(exists);
}
@Test
public void updateTest1() throws IOException {
UpdateRequest request = new UpdateRequest(
"posts",
"1");
Map<String, Object> parameters = singletonMap("count", 1);
Script inline = new Script(
ScriptType.INLINE, "painless",
"ctx._source.age += params.count", parameters);
request.script(inline);
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(updateResponse));
}
@Test
public void updateTest2() throws IOException {
UpdateRequest request = new UpdateRequest(
"posts",
"1");
Map<String, Object> parameters = singletonMap("count", 1);
Script inline = new Script(
ScriptType.INLINE, "painless",
"ctx._source.age += params.count", parameters);
request.script(inline);
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(updateResponse));
}
/**
* 对部分文档使用更新时,部分文档将与现有文档合并。
* 部分文档可以通过不同的方式提供
*/
@Test
public void updateTest3() throws IOException {
// UpdateRequest request = new UpdateRequest("posts", "1");
// String jsonString = "{" +
// "\"updated\":\"2017-01-01\"," +
// "\"reason\":\"daily update\"" +
// "}";
// request.doc(jsonString, XContentType.JSON);
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("updated", new Date());
jsonMap.put("reason", "daily update");
UpdateRequest request = new UpdateRequest("posts", "1").doc(jsonMap);
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(updateResponse));
}
@Test
public void updateTest4() throws IOException {
UpdateRequest request = new UpdateRequest("posts", "1");
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("updated", new Date());
request.upsert(jsonMap);
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(updateResponse));
}
//术语向量API返回特定文档字段中术语的信息和统计信息。文档可以存储在索引中或者由用户人工提供。
@Test
public void TermVectorsTest1() throws IOException {
TermVectorsRequest request = new TermVectorsRequest("posts", "1");
request.setFields("user1");
//将fieldStatistics设置为false(默认值为true)以忽略文档计数、文档频率之和、术语频率之和
request.setFieldStatistics(false);
//将termStatistics设置为true(默认值为false)以显示术语总频率和文档频率。
request.setTermStatistics(true);
//将位置设置为false(默认值为true)以忽略位置的输出
request.setPositions(false);
//将偏移设置为false(默认值为true)以忽略偏移的输出
request.setOffsets(false);
//将有效载荷设置为false(默认值为true)以忽略有效载荷的输出
request.setPayloads(false);
//设置filterSettings可根据tf idf分数过滤可返回的术语。
Map<String, Integer> filterSettings = new HashMap<>();
filterSettings.put("max_num_terms", 3);
filterSettings.put("min_term_freq", 1);
filterSettings.put("max_term_freq", 10);
filterSettings.put("min_doc_freq", 1);
filterSettings.put("max_doc_freq", 100);
filterSettings.put("min_word_length", 1);
filterSettings.put("max_word_length", 10);
request.setFilterSettings(filterSettings);
//将perFieldAnalyzer设置为指定一个不同于该字段的分析器
Map<String, String> perFieldAnalyzer = new HashMap<>();
perFieldAnalyzer.put("user", "keyword");
request.setPerFieldAnalyzer(perFieldAnalyzer);
//将realtime设置为false(默认值为true)以接近实时地检索术语向量。
request.setRealtime(false);
//设置路由参数
request.setRouting("routing");
//
TermVectorsResponse termVectorsResponse = client.termvectors(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(termVectorsResponse));
}
/**
* 批量操作
*/
@Test
public void bulkTest1() throws IOException {
BulkRequest request = new BulkRequest();
request.add(new IndexRequest("posts").id("3")
.source(XContentType.JSON, "user", "foo"));
request.add(new IndexRequest("posts").id("4")
.source(XContentType.JSON, "user", "bar"));
request.add(new IndexRequest("posts").id("5")
.source(XContentType.JSON, "user", "baz"));
BulkResponse bulkItemResponses = client.bulk(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(bulkItemResponses));
}
@Test
public void bulkTest2() throws IOException {
BulkRequest request = new BulkRequest();
request.add(new DeleteRequest("posts", "3"));
request.add(new UpdateRequest("posts", "2")
.doc(XContentType.JSON,"other", "test"));
request.add(new IndexRequest("posts").id("4")
.source(XContentType.JSON,"field", "baz"));
BulkResponse bulkItemResponses = client.bulk(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(bulkItemResponses));
}
@Test
public void mGetTest1() throws IOException {
MultiGetRequest request = new MultiGetRequest();
request.add(new MultiGetRequest.Item(
"posts",
"1") .fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)
);
request.add(new MultiGetRequest.Item(
"posts",
"2")
);
MultiGetResponse mget = client.mget(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(mget));
}
//重新构建索引
//ReindexRequest可用于将文档从一个或多个索引复制到目标索引中。
//它需要一个现有的源索引和一个目标索引,这两个索引在请求之前可能存在,也可能不存在。
// Reindex不会尝试设置目标索引。它不会复制源索引的设置。您应该在运行重新索引操作之前设置目标索引,包括设置映射、碎片计数、副本等
@Test
public void reIndexTest() throws IOException {
ReindexRequest request = new ReindexRequest();
// request.setSourceIndices("posts", "employee");
request.setSourceIndices("posts");
request.setDestIndex("test1");
BulkByScrollResponse reindex = client.reindex(request, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(reindex));
}
}
Search API
package com.es.test;
import com.alibaba.fastjson.JSON;
import com.es.RestHighLevelClientEsApplication;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = RestHighLevelClientEsApplication.class)
public class RestHighLevelClientEsSearchApiTest {
@Qualifier("restHighLevelClient")
@Autowired
private RestHighLevelClient client;
// search index all
@Test
public void searchTest1() throws IOException {
SearchRequest searchRequest = new SearchRequest();
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(QueryBuilders.matchAllQuery());
searchRequest.source(searchSourceBuilder);
SearchResponse search = client.search(searchRequest, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(search));
}
@Test
public void searchTest2() throws IOException {
SearchRequest searchRequest = new SearchRequest("posts");
searchRequest.routing("routing");
//设置 IndicesOptions 控制如何解析不可用索引以及如何扩展通配符表达式
searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
//使用首选项参数,例如执行搜索以首选本地碎片。默认设置是在碎片之间随机化。
searchRequest.preference("_local");
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
//设置查询。可以是任何类型的QueryBuilder
sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy"));
sourceBuilder.from(0);
sourceBuilder.size(5);
sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
searchRequest.source(sourceBuilder);
SearchResponse search = client.search(searchRequest, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(search));
}
@Test
public void searchTest3() throws IOException {
SearchRequest searchRequest = new SearchRequest("posts");
searchRequest.routing("routing");
//设置 IndicesOptions 控制如何解析不可用索引以及如何扩展通配符表达式
searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
//使用首选项参数,例如执行搜索以首选本地碎片。默认设置是在碎片之间随机化。
searchRequest.preference("_local");
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
//设置查询。可以是任何类型的QueryBuilder
// MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("user", "kimchy");
// //设置模糊查询
// matchQueryBuilder.fuzziness(Fuzziness.AUTO);
// //在匹配查询上设置前缀长度选项
// matchQueryBuilder.prefixLength(3);
// //设置最大扩展选项以控制查询的模糊过程
// matchQueryBuilder.maxExpansions(10);
QueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("user", "kimchy")
.fuzziness(Fuzziness.AUTO)
.prefixLength(3)
.maxExpansions(10);
sourceBuilder.query(matchQueryBuilder);
sourceBuilder.from(0);
sourceBuilder.size(5);
//按分数递减排序(默认值)
sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC));
//按_id递增排序 如果有自身的ID 可以 按 id排序 ,如果没有该字段 还要按该字段排序则会抛出异常
sourceBuilder.sort(new FieldSortBuilder("_id").order(SortOrder.ASC));
sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
String[] includeFields = new String[] {"*"};
sourceBuilder.fetchSource(includeFields, null);
searchRequest.source(sourceBuilder);
SearchResponse search = client.search(searchRequest, RequestOptions.DEFAULT);
System.out.println(JSON.toJSONString(search));
}
}
增删改查命令在源码包可以查看