1.使用自带standard类型分词器分词
POST _analyze
{
"analyzer": "standard",
"text": ["我是中国人"]
}
2.使用ik_smart类型分词器分词
POST _analyze
{
"analyzer": "ik_smart",
"text": ["我是中国人"]
}
3.使用ik_max_word类型分词器分词
POST _analyze
{
"analyzer": "ik_max_word",
"text": ["我是中国人"]
}
4.创建索引es_db
PUT /es_db
5.查询索引es_db
GET /es_db
6.在索引es_db新增id=1,2,3,4,5的文档
POST /es_db/_doc/1
{
"uid" : "1234",
"phone" : "12345678909",
"message" : "我是",
"msgcode" : "1",
"sendtime" : "2022-03-15 09:00:00"
}
POST /es_db/_doc/2
{
"uid" : "1234",
"phone" : "12345678909",
"message" : "我是中国人",
"msgcode" : "1",
"sendtime" : "2022-03-15 09:00:00"
}
POST /es_db/_doc/3
{
"uid" : "1234",
"phone" : "12345678909",
"message" : "我",
"msgcode" : "1",
"sendtime" : "2022-03-15 09:00:00"
}
POST /es_db/_doc/4
{
"uid" : "1234",
"phone" : "12345678909",
"message" : "中国人",
"msgcode" : "1",
"sendtime" : "2022-03-15 09:00:00"
}
POST /es_db/_doc/5
{
"uid" : "1234",
"phone" : "12345678909",
"message" : "中国",
"msgcode" : "1",
"sendtime" : "2022-03-15 09:00:00"
}
7.查询索引es_db下id=1的文档
GET /es_db/_doc/1
8.删除es_db下id=1的文档
DELETE /es_db/_doc/1
9.删除索引es_db
DELETE /es_db
10.新增索引es_db,且指定分词器ik_max_word
PUT /es_db
{
"settings":{
"index":{
"analysis.analyzer.default.type":"ik_max_word"
}
}
}
11.查询es_db索引下的所有数据
GET /es_db/_search
{
"query": {
"match_all": {}
}
}
12.查询es_db含message中含"中国人"的数据
GET /es_db/_search
{
"query": {
"wildcard": {
"message": "*中国人*"
}
}
}
13.将根据中国,国人,中国人3个分词进行数据查询
GET /es_db/_search
{
"query": {
"match": {
"message": "中国人"
}
}
}