ElasticSearch

Lucene

简介

Lucene是apache软件基金会4 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包,但它不是一个完整的全文检索引擎,而是一个全文检索引擎的架构,提供了完整的查询引擎和索引引擎,部分文本分析引擎。
Lucene的目的是为软件开发人员提供一个简单易用的工具包,以方便的在目标系统中实现全文检索的功能,或者是以此为基础建立起完整的全文检索引擎。Lucene是一套用于全文检索和搜寻的开源程式库,由Apache软件基金会支持和提供。

工作流程示意图

在这里插入图片描述

Lucene的API应用

将IKAnalyzer相关代码拷贝到应用程序中。
在这里插入图片描述

查看Lucene本地存储

Lucene本地存储系统
在这里插入图片描述
注意:本地存储系统中的文件个格式特殊,不能查看;如要查看,需通过特殊的应用软件。
在这里插入图片描述
luke-6.6.0或其他版本可在github中获得。
运行软件:点击luke.bat(Windows环境)
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
通过该软件可以清除的看出一共有多少个document,并且知道每一个document包含的数据。

源码
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>cn.edu360.lucene</groupId>
    <artifactId>LuceneDemo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <!-- 定义一下常量 -->
    <properties>
        <maven.compiler.source>1.8</maven.compiler.source>
        <maven.compiler.target>1.8</maven.compiler.target>
        <encoding>UTF-8</encoding>
    </properties>


    <dependencies>

        <dependency>
            <groupId>org.apache.httpcomponents</groupId>
            <artifactId>httpclient</artifactId>
            <version>4.5.2</version>
        </dependency>

        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-api</artifactId>
            <version>2.3</version>
        </dependency>

        <dependency>
            <groupId>commons-logging</groupId>
            <artifactId>commons-logging</artifactId>
            <version>1.2</version>
        </dependency>

        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.25</version>
            <scope>test</scope>
        </dependency>


        <!-- lucene的核心 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-core</artifactId>
            <version>6.6.0</version>
        </dependency>

        <!-- lucene的分词器,有标准的英文相关的分词器,没有中文的 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-analyzers-common</artifactId>
            <version>6.6.0</version>
        </dependency>

        <!-- 查询解析器 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-queryparser</artifactId>
            <version>6.6.0</version>
        </dependency>

        <!-- 各种查询方式 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-queries</artifactId>
            <version>6.6.0</version>
        </dependency>

        <!-- 关键字高亮 -->
        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-highlighter</artifactId>
            <version>6.6.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.lucene</groupId>
            <artifactId>lucene-demo</artifactId>
            <version>6.6.0</version>
        </dependency>

        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
        </dependency>

        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-api</artifactId>
            <version>1.7.22</version>
        </dependency>

    </dependencies>

</project>
package cn.edu360.lucene;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.*;
import org.apache.lucene.store.FSDirectory;
import org.junit.Test;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.IOException;
import java.nio.file.Paths;


public class HelloWorld {


    /**
     * 往用lucene写入数据
     * @throws IOException
     */
    @Test
    public void testCreate() throws IOException {
        Article article = new Article();
        article.setId(111L);
        article.setAuthor("猪");
        article.setTitle("学习Java");
        article.setContent("学数据,迎娶丁老师!");
        article.setUrl("http://www.edu360.cn/a10011");

        String indexPath = "D:\\lucene\\index";
        FSDirectory fsDirectory = FSDirectory.open(Paths.get(indexPath));
        //创建一个标准分词器,一个字分一次
        //Analyzer analyzer = new StandardAnalyzer();
        Analyzer analyzer = new IKAnalyzer(true);
        //写入索引的配置,设置了分词器
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
        //指定了写入数据目录和配置
        IndexWriter indexWriter = new IndexWriter(fsDirectory, indexWriterConfig);
        //创建一个文档对象
        Document document = article.toDocument();
        //通过IndexWriter写入
        indexWriter.addDocument(document);
        indexWriter.close();
    }

    @Test
    public void testSearch() throws IOException, ParseException {

        String indexPath = "D:\\lucene\\index";
        //Analyzer analyzer = new StandardAnalyzer();
        Analyzer analyzer = new IKAnalyzer(true);
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        //索引查询器
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);

        String queryStr = "数据";
        //创建一个查询条件解析器
        QueryParser parser = new QueryParser("content", analyzer);
        //对查询条件进行解析
        Query query = parser.parse(queryStr);

         //TermQuery将查询条件当成是一个固定的词
        //Query query = new TermQuery(new Term("url", "http://www.edu360.cn/a10010"));
        //在【索引】中进行查找
        TopDocs topDocs = indexSearcher.search(query, 10);

        //获取到查找到的文文档ID和得分
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            //从索引中查询到文档的ID,
            int doc = scoreDoc.doc;
            //在根据ID到文档中查找文档内容
            Document document = indexSearcher.doc(doc);
            //将文档转换成对应的实体类
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }

    @Test
    public void testDelete() throws IOException, ParseException {

        String indexPath = "D:\\lucene\\index";
        Analyzer analyzer = new IKAnalyzer(true);
        FSDirectory fsDirectory = FSDirectory.open(Paths.get(indexPath));
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(fsDirectory, indexWriterConfig);

        //Term词条查找,内容必须完全匹配,不分词
        //indexWriter.deleteDocuments(new Term("content", "学好"));

        //QueryParser parser = new QueryParser("title", analyzer);
        //Query query = parser.parse("大数据老师");

        //LongPoint是建立索引的
        //Query query = LongPoint.newRangeQuery("id", 99L, 120L);
        Query query = LongPoint.newExactQuery("id", 108L);

        indexWriter.deleteDocuments(query);

        indexWriter.commit();
        indexWriter.close();
    }

    /**
     * lucene的update比较特殊,update的代价太高,先删除,然后在插入
     * @throws IOException
     * @throws ParseException
     */
    @Test
    public void testUpdate() throws IOException, ParseException {

        String indexPath = "D:\\lucene\\index";
        //StandardAnalyzer analyzer = new StandardAnalyzer();
        Analyzer analyzer = new IKAnalyzer(true);
        FSDirectory fsDirectory = FSDirectory.open(Paths.get(indexPath));
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
        IndexWriter indexWriter = new IndexWriter(fsDirectory, indexWriterConfig);


        Article article = new Article();
        article.setId(106L);
        article.setAuthor("老张");
        article.setTitle("学好大数据,要找赵老师");
        article.setContent("迎娶白富美,走上人生巅峰!!!");
        article.setUrl("http://www.edu360.cn/a111");
        Document document = article.toDocument();
        System.out.println(article.toString());

        indexWriter.updateDocument(new Term("author", "猪"), document);

        indexWriter.commit();
        indexWriter.close();
    }

    /**
     * 可以从多个字段中查找
     * @throws IOException
     * @throws ParseException
     */
    @Test
    public void testMultiField() throws IOException, ParseException {

        String indexPath = "D:\\lucene\\index";
        Analyzer analyzer = new IKAnalyzer(true);
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);

        String[] fields = {"title", "content"};
        //多字段的查询转换器
        MultiFieldQueryParser queryParser = new MultiFieldQueryParser(fields, analyzer);
        Query query = queryParser.parse("老师");

        TopDocs topDocs = indexSearcher.search(query, 10);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }

    /**
     * 查找全部的数据
     * @throws IOException
     * @throws ParseException
     */
    @Test
    public void testMatchAll() throws IOException, ParseException {

        String indexPath = "D:\\lucene\\index";
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);

        Query query = new MatchAllDocsQuery();

        TopDocs topDocs = indexSearcher.search(query, 10);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }

    /**
     * 布尔查询,可以组合多个查询条件
     * @throws Exception
     */
    @Test
    public void testBooleanQuery() throws Exception {
        String indexPath = "D:\\lucene\\index";
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);

        Query query1 = new TermQuery(new Term("title", "老师"));
        Query query2 = new TermQuery(new Term("content", "丁"));
        BooleanClause bc1 = new BooleanClause(query1, BooleanClause.Occur.MUST);
        BooleanClause bc2 = new BooleanClause(query2, BooleanClause.Occur.MUST_NOT);
        BooleanQuery boolQuery = new BooleanQuery.Builder().add(bc1).add(bc2).build();
        System.out.println(boolQuery);

        TopDocs topDocs = indexSearcher.search(boolQuery, 10);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }

    @Test
    public void testQueryParser() throws Exception {
        String indexPath = "D:\\lucene\\index";
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);

        //创建一个QueryParser对象。参数1:默认搜索域 参数2:分析器对象。
        QueryParser queryParser = new QueryParser("title", new IKAnalyzer(true));

        //Query query = queryParser.parse("数据");
		Query query = queryParser.parse("title:学好 OR title:学习");
        System.out.println(query);

        TopDocs topDocs = indexSearcher.search(query, 10);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }


    @Test
    public void testRangeQuery() throws Exception {
        String indexPath = "D:\\lucene\\index";
        DirectoryReader directoryReader = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath)));
        IndexSearcher indexSearcher = new IndexSearcher(directoryReader);


        Query query = LongPoint.newRangeQuery("id", 107L, 111L);

        System.out.println(query);

        TopDocs topDocs = indexSearcher.search(query, 10);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc scoreDoc : scoreDocs) {
            int doc = scoreDoc.doc;
            Document document = indexSearcher.doc(doc);
            Article article = Article.parseArticle(document);
            System.out.println(article);
        }

        directoryReader.close();
    }
}

package cn.edu360.lucene;

import org.apache.lucene.document.*;
import org.apache.lucene.document.Field.Store;

public class Article {

	private Long id;
	
	private String title;
	
	private String content;
	
	private String author;
	
	private String url;

	public Article(){}
	
	public Article(Long id, String title, String content, String author,
			String url) {
		super();
		this.id = id;
		this.title = title;
		this.content = content;
		this.author = author;
		this.url = url;
	}

	public Long getId() {
		return id;
	}

	public void setId(Long id) {
		this.id = id;
	}

	public String getTitle() {
		return title;
	}

	public void setTitle(String title) {
		this.title = title;
	}

	public String getContent() {
		return content;
	}

	public void setContent(String content) {
		this.content = content;
	}

	public String getAuthor() {
		return author;
	}

	public void setAuthor(String author) {
		this.author = author;
	}

	public String getUrl() {
		return url;
	}

	public void setUrl(String url) {
		this.url = url;
	}
	
	public Document toDocument(){
		//Lucene存储的格式(Map装的k,v)
		Document doc = new Document();
		//向文档中添加一个long类型的属性,建立索引
		doc.add(new LongPoint("id", id));
		//在文档中存储
		doc.add(new StoredField("id", id));

		//设置一个文本类型,会对内容进行分词,建立索引,并将内容在文档中存储
		doc.add(new TextField("title", title, Store.YES));
		//设置一个文本类型,会对内容进行分词,建立索引,存在文档中存储 / No代表不存储
		doc.add(new TextField("content", content, Store.YES));

		//StringField,不分词,建立索引,文档中存储
		doc.add(new StringField("author", author, Store.YES));

		//不分词,不建立索引,在文档中存储,
		doc.add(new StoredField("url", url));
		return doc;
	}
	
	public static Article parseArticle(Document doc){
		Long id = Long.parseLong(doc.get("id"));
		String title = doc.get("title");
		String content = doc.get("content");
		String author = doc.get("author");
	    String url = doc.get("url");
		Article article = new Article(id, title, content, author, url);
		return article;
	}

	@Override
	public String toString() {
		return "id : " + id + " , title : " + title + " , content : " + content + " , author : " + author + " , url : " + url;
	}
	
	
}

Lucene的查询总结

1.Term查询,不分词,严格匹配内容。
2.分词查询。
3.多字段查询。
4.Boolean查询。
5.范围查询(数字类型)
6.确切值查询(数字类型)
7.查询所有

ElasticSearch

简介

ElasticSearch是一个基于Lucene的搜索服务器。它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口。Elasticsearch是用Java语言开发的,并作为Apache许可条款下的开放源码发布,是一种流行的企业级搜索引擎。ElasticSearch用于云计算中,能够达到实时搜索,稳定,可靠,快速,安装使用方便。官方客户端在Java、.NET(C#)、PHP、Python、Apache Groovy、Ruby和许多其他语言中都是可用的。根据DB-Engines的排名显示,Elasticsearch是最受欢迎的企业搜索引擎,其次是Apache Solr,也是基于Lucene。

相关概念

ES中相关的概念,是基于Lucene开发的一个分布式全文检索框架,往ES中存储和从ES中查询,格式是Json

索引:Index 相当于数据库中的database
类型:Type 相当于数据库中的table
主键:id 相当于数据库中的主键
往ES中存储数据,其实就是往ES中的Index下的Type中存储JSON数据

下载和安装

下载

https://www.elastic.co/cn/downloads/

安装

单机版配置

1.安装JDK(1.8)
2.上传解压Elasticsearch-5.4.3
3.创建一个普通用户,然后将对于的目录修改为普通用户的所属用户和所属组
注意:root用户不能启动elasticsearch
4.修改配置文件config/elasticsearch.yml
network.host: 192.168.145.201
5.启动ES,发现报错
bin/elasticsearch
6.查看错误日志
cat logs/elasticsearch.log
#出现错误
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
[2]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
7.解决问题
#用户最大可创建文件数太小
sudo vi /etc/security/limits.conf

  • soft nofile 65536
  • hard nofile 65536
    #查看可打开文件数量
    ulimit -Hn
    在这里插入图片描述
    #最大虚拟内存太小
    sudo vi /etc/sysctl.conf
    vm.max_map_count=262144
    #查看虚拟内存的大小
    sudo sysctl -p
    在这里插入图片描述
    8.重启linux
    shutdown -r now
    9.通过浏览器访问ES
    bin/elasticsearch -d(后台启动)
    192.168.100.211:9200
    在这里插入图片描述
集群

0.创建新用户
adduser es
passwd es
注意:关于elasticsearch的所有操作都是在es用户完成,注意目录权限问题
1.安装jdk(jdk要求1.8.20以上)
2.上传es安装包
3.解压es
tar -zxvf elasticsearch-5.4.3.tar.gz -C /appdata/
4.修改配置
vi /appdata/elasticsearch-5.4.3/config/elasticsearch.yml

#集群名称,通过组播的方式通信,通过名称判断属于哪个集群
cluster.name: my-es
#节点名称,要唯一
node.name: es-1
#数据存放位置(注意是否有权限创建该目录)
path.data: /data/es/data
#日志存放位置(可选)
path.logs: /data/es/logs
#es绑定的ip地址
network.host: 192.168.145.201
#初始化时可进行选举的节点(如果集群数量较大,不用将所有的节点都配置成可选举节点)
discovery.zen.ping.unicast.hosts: ["n1", "n2", "n3"]

5.启动:/appdata/elasticsearch-5.4.3/bin/elasticsearch -d
#出现错误
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
[2]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]

#用户最大可创建文件数太小
sudo vi /etc/security/limits.conf

  • soft nofile 65536
  • hard nofile 65536
    #查看可打开文件数量
    ulimit -Hn
    #最大虚拟内存太小
    sudo vi /etc/sysctl.conf
    vm.max_map_count=262144
    #查看虚拟内存的大小
    sudo sysctl -p

5.使用scp拷贝到其他节点
scp -r elasticsearch-5.4.3/ n2: P W D s c p − r e l a s t i c s e a r c h − 5.4.3 / n 3 : PWD scp -r elasticsearch-5.4.3/ n3: PWDscprelasticsearch5.4.3/n3:PWD

6.在其他节点上修改es配置,需要修改的有node.name和network.host

7.启动es(/bigdata/elasticsearch-5.4.3/bin/elasticsearch -h查看帮助文档)
/appdata/elasticsearch-5.4.3/bin/elasticsearch -d

8.用浏览器访问es所在机器的9200端口
http://n1:9200/
在这里插入图片描述
http://n2:9200/
在这里插入图片描述
http://n3:9200/
在这里插入图片描述

RESTFul风格的API(请求规范)

通过http的形式,发送请求,对ES进行操作
查询:请求方式应该为GET
删除:请求方式应该为DELETE
添加:请求方式应该为PUT/POST
修改:请求方式应该为PUT/POST

RESTful接口URL的格式:
http://n1:9200///[]
其中index、type是必须提供的。
id是可选的,不提供es会自动生成。
index、type将信息进行分层,利于管理。
index可以理解为数据库;type理解为数据表;id相当于数据库表中记录的主键,是唯一的。

添加

#向store索引中添加一些书籍
curl -XPUT ‘http://n1:9200/store/books/1’ -d ‘{
“title”: “Elasticsearch: The Definitive Guide”,
“name” : {
“first” : “Zachary”,
“last” : “Tong”
},
“publish_date”:“2015-02-06”,
“price”:“49.99”
}’

#在添加一个书的信息
curl -XPUT ‘http://n1:9200/store/books/2’ -d ‘{
“title”: “Elasticsearch Blueprints”,
“name” : {
“first” : “Vineeth”,
“last” : “Mohan”
},
“publish_date”:“2015-06-06”,
“price”:“35.99”
}’

查询

#在linux中通过curl的方式查询
curl -XGET ‘http://n1:9200/store/books/1
#通过浏览器查询
http://n1:9200/store/books/1
#通过_source获取指定的字段
curl -XGET ‘http://n1:9200/store/books/1?_source=title
curl -XGET ‘http://n1:9200/store/books/1?_source=title,price
curl -XGET ‘http://n1:9200/store/books/1?_source

更新

#可以通过覆盖的方式更新
curl -XPUT ‘http://n1:9200/store/books/1’ -d ‘{
“title”: “Elasticsearch: The Definitive Guide”,
“name” : {
“first” : “Zachary”,
“last” : “Tong”
},
“publish_date”:“2016-02-06”,
“price”:“99.99”
}’
#或者通过 _update API的方式单独更新你想要更新的
curl -XPOST ‘http://n1:9200/store/books/1/_update’ -d ‘{
“doc”: {
“price” : 88.88
}
}’

删除

#删除一个文档
curl -XDELETE ‘http://n1:9200/store/books/1

curl -XPUT ‘http://n1:9200/store/books/4’ -d ‘{
“title”: “Elasticsearch: The Definitive Guide”,
“author”: “Guide”,
“publish_date”:“2016-02-06”,
“price”:“35.99”
}’

过滤查询

最简单filter查询

SELECT * FROM books WHERE price = 35.99
filtered 查询价格是35.99的
返回的的分是1.0
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query”: {
“bool”: {
“must”: {
“match_all”: {}
},
“filter”: {
“term”: {
“price”: 35.99
}
}
}
}
}’

返回的的分是1.0
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query”: {
“constant_score”: {
“filter”: {
“term”: {
“price”: 35.99
}
}
}
}
}’
#返回的的分是0.0
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query”: {
“bool”: {
“filter” : {
“term” : {
“price” : 35.99
}
}
}
}
}’

#指定多个值
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query” : {
“bool” : {
“filter” : {
“terms” : {
“price” : [35.99, 99.99]
}
}
}
}
}’

curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query” : {
“bool” : {
“must”: {
“match_all”: {}
},
“filter” : {
“terms” : {
“price” : [35.99, 99.99]
}
}
}
}
}’

SELECT * FROM books WHERE publish_date = “2015-02-06”
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query” : {
“bool” : {
“filter” : {
“term” : {
“publish_date” : “2015-02-06”
}
}
}
}
}’

bool过滤查询,可以做组合过滤查询

SELECT * FROM books WHERE (price = 35.99 OR price = 99.99) AND publish_date != “2016-02-06”
类似的,Elasticsearch也有 and, or, not这样的组合条件的查询方式
格式如下:
{
“bool” : {
“must” : [],
“should” : [],
“must_not” : [],
}
}

must: 条件必须满足,相当于 and
should: 条件可以满足也可以不满足,相当于 or
must_not: 条件不需要满足,相当于 not

curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query” : {
“bool” : {
“should” : [
{ “term” : {“price” : 35.99}},
{ “term” : {“price” : 99.99}}
],
“must_not” : {
“term” : {“publish_date” : “2016-02-06”}
}
}
}
}’

嵌套查询

SELECT * FROM books WHERE price = 35.99 OR ( publish_date = “2016-02-06” AND price = 99.99 )
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query”: {
“bool”: {
“should”: [
{
“term”: {
“price”: 35.99
}
},
{
“bool”: {
“must”: [
{
“term”: {
“publish_date”: “2016-02-06”
}
},
{
“term”: {
“price”: 99.99
}
}
]
}
}
]
}
}
}’

range范围过滤

SELECT * FROM books WHERE price >= 10 AND price < 99
gt : > 大于
lt : < 小于
gte : >= 大于等于
lte : <= 小于等于

curl -XGET ‘http://n1:9200/store/books/_search’ -d '{
“query”: {
“range” : {
“price” : {
“gte” : 10,
“lt” : 99
}
}
}
}
#name和author都必须包含Guide,并且价钱等于33.99或者188.99
curl -XGET ‘http://n1:9200/store/books/_search’ -d ‘{
“query”: {
“bool”: {
“must”: {
“multi_match”: {
“operator”: “and”,
“fields”: [
“name”,
“author”
],
“query”: “Guide”
}
},
“filter”: {
“terms”: {
“price”: [
35.99,
188.99
]
}
}
}
}
}’
http://n1:9200/store/books/_search

elasticsearch-head插件安装流程

作用:elasticsearch-head图形化插件可以直观的看出elasticserch中保存的数据。
更新yum源
sudo yum update -y
rpm -ivh http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
rpm -ivh https://kojipkgs.fedoraproject.org//packages/http-parser/2.7.1/3.el7/x86_64/http-parser-2.7.1-3.el7.x86_64.rpm
yum install npm
yum install -y git
yum install -y bzip2
git clone git://github.com/mobz/elasticsearch-head.git

将源码包下载后剪切到/appdata目录,并改所属用户和组
sudo chown -R es:es /appdata/elasticsearch-head

进入到elasticsearch-head中
cd elasticsearch-head
编译安装
npm install
在这里插入图片描述
遇见上面的情况,可快速将本地的phantomjs-2.1.1-linux-x86_64.tar.bz2拷贝至/tmp/phantomjs/目录下。
打开elasticsearch-head/Gruntfile.js,找到下面connect属性,新增hostname: ‘0.0.0.0’,

		connect: {
                        server: {
                                options: {
                                        hostname: '0.0.0.0',
                                        port: 9100,
                                        base: '.',
                                        keepalive: true
                                }
                        }
                }

编辑elasticsearch-5.4.3/config/elasticsearch.yml,加入以下内容:
http.cors.enabled: true
http.cors.allow-origin: “*”

先启动elasticsearch
cd /appdata/elasticsearch
bin/slasticsearch
运行服务
npm run start
在这里插入图片描述
终于成功了。

安装ik分词器

0.下载对应版本的插件
https://github.com/medcl/elasticsearch-analysis-ik/releases

1.上传分词器插件
在这里插入图片描述
2.解压到指定目录
mkdir ik
cd ik
unzip …/elasticsearch-analysis-ik-5.4.3.zip
将elasticsearch-analysis-ik-5.4.3.zip 全部解压到了ik之中
3.将ik文件夹拷贝导/appdata/elasticsearch/plugins中
mv -t /appdata/elasticsearch/plugins/ ik
4.将本机的ik文件拷贝导其他主机上。
scp -r ik n3:$PWD
5.重启elasticsearch服务

#创建索引名字叫news
curl -XPUT http://n1:9200/news

#创建mapping(相当于数据中的schema信息,表名和字段名以及字段的类型)
curl -XPOST http://n1:9200/news/fulltext/_mapping -d’
{
“properties”: {
“content”: {
“type”: “text”,
“analyzer”: “ik_max_word”,
“search_analyzer”: “ik_max_word”
}
}
}’

curl -XPOST http://n1:9200/news/fulltext/1 -d’
{“content”:“美国留给伊拉克的是个烂摊子吗”}’

curl -XPOST http://n1:9200/news/fulltext/2 -d’
{“content”:“公安部:各地校车将享最高路权”}’

curl -XPOST http://n1:9200/news/fulltext/3 -d’
{“content”:“中韩渔警冲突调查:韩警平均每天扣1艘中国渔船”}’

curl -XPOST http://n1:9200/news/fulltext/4 -d’
{“content”:“中国驻洛杉矶领事馆遭亚裔男子枪击 嫌犯已自首”}’

curl -XPOST http://n1:9200/news/fulltext/_search -d’
{
“query” : { “match” : { “content” : “中国” }},
“highlight” : {
“pre_tags” : ["", “”],
“post_tags” : ["
", “”],
“fields” : {
“content” : {}
}
}
}’

curl -XGET ‘http://n1:9200/_analyze?pretty&analyzer=ik_max_word’ -d ‘联想是全球最大的笔记本厂商’
curl -XGET ‘https://n1:9200/_analyze?pretty&analyzer=ik_smart’ -d ‘联想是全球最大的笔记本厂商’
curl -XPUT ‘https://n1:9200/iktest?pretty’ -d ‘{
“settings” : {
“analysis” : {
“analyzer” : {
“ik” : {
“tokenizer” : “ik_max_word”
}
}
}
},
“mappings” : {
“article” : {
“dynamic” : true,
“properties” : {
“subject” : {
“type” : “string”,
“analyzer” : “ik_max_word”
}
}
}
}
}’

curl -XPUT ‘https://n1:9200/iktest?pretty’ -d ‘{
“settings” : {
“analysis” : {
“analyzer” : {
“ik” : {
“tokenizer” : “ik_max_word”
}
}
}
},
“mappings” : {
“article” : {
“dynamic” : true,
“properties” : {
“subject” : {
“type” : “string”,
“analyzer” : “ik_max_word”
}
}
}
}
}’

curl -XGET ‘http://n1:9200/_analyze?pretty&analyzer=ik_max_word’ -d ‘中华人民共和国’

javaAPI

package cn.edu360.es;

import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.reindex.DeleteByQueryAction;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.Before;
import org.junit.Test;

import java.io.IOException;
import java.net.InetAddress;
import java.util.Date;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;


/**
 * Created by zx on 2017/9/5.
 * https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.4/index.html
 */
public class EsCRUD {

    private TransportClient client = null;

    @Before
    public void init() throws Exception {
        //设置集群名称
        Settings settings = Settings.builder()
                .put("cluster.name", "my-es")
                //自动感知的功能(可以通过当前指定的节点获取所有es节点的信息)
                .put("client.transport.sniff", true)
                .build();
        //创建client
        client = new PreBuiltTransportClient(settings).addTransportAddresses(
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));

    }


    @Test
    public void testCreate() throws IOException {

        IndexResponse response = client.prepareIndex("gamelog", "users", "1")
                .setSource(
                        jsonBuilder()
                                .startObject()
                                    .field("username", "老赵")
                                    .field("gender", "male")
                                    .field("birthday", new Date())
                                    .field("fv", 9999)
                                    .field("message", "trying out Elasticsearch")
                                .endObject()
                ).get();


    }

    //查找一条
    @Test
    public void testGet() throws IOException {
        GetResponse response = client.prepareGet("gamelog", "users", "1").get();
        System.out.println(response.getSourceAsString());
    }


    //查找多条
    @Test
    public void testMultiGet() throws IOException {
        MultiGetResponse multiGetItemResponses = client.prepareMultiGet()
                .add("gamelog", "users", "1")
                .add("gamelog", "users", "2", "3")
                .add("news", "fulltext", "1")
                .get();

        for (MultiGetItemResponse itemResponse : multiGetItemResponses) {
            GetResponse response = itemResponse.getResponse();
            if (response.isExists()) {
                String json = response.getSourceAsString();
                System.out.println(json);
            }
        }
    }

    @Test
    public void testUpdate() throws Exception {
        UpdateRequest updateRequest = new UpdateRequest();
        updateRequest.index("gamelog");
        updateRequest.type("users");
        updateRequest.id("2");
        updateRequest.doc(
                jsonBuilder()
                    .startObject()
                        .field("fv", 999.9)
                    .endObject());
        client.update(updateRequest).get();
    }

    @Test
    public void testDelete() {
        DeleteResponse response = client.prepareDelete("gamelog", "users", "2").get();
        System.out.println(response);
    }

    @Test
    public void testDeleteByQuery() {
        BulkByScrollResponse response =
                DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
                        //指定查询条件
                        .filter(QueryBuilders.matchQuery("username", "老段"))
                        //指定索引名称
                        .source("gamelog")
                        .get();

        long deleted = response.getDeleted();
        System.out.println(deleted);


    }

    //异步删除
    @Test
    public void testDeleteByQueryAsync() {
        DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
                .filter(QueryBuilders.matchQuery("gender", "male"))
                .source("gamelog")
                .execute(new ActionListener<BulkByScrollResponse>() {
                    @Override
                    public void onResponse(BulkByScrollResponse response) {
                        long deleted = response.getDeleted();
                        System.out.println("数据删除了");
                        System.out.println(deleted);
                    }

                    @Override
                    public void onFailure(Exception e) {
                        e.printStackTrace();
                    }
                });
        try {
            System.out.println("异步删除");
            Thread.sleep(10000);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Test
    public void testRange() {

        QueryBuilder qb = rangeQuery("fv")
                // [88.99, 10000)
                .from(88.99)
                .to(10000)
                .includeLower(true)
                .includeUpper(false);

        SearchResponse response = client.prepareSearch("gamelog").setQuery(qb).get();

        System.out.println(response);
    }


    /**
     * curl -XPUT 'http://192.168.5.251:9200/player_info/player/1' -d '{ "name": "curry", "age": 29, "salary": 3500,"team": "war", "position": "pg"}'
     * curl -XPUT 'http://192.168.5.251:9200/player_info/player/2' -d '{ "name": "thompson", "age": 26, "salary": 2000,"team": "war", "position": "pg"}'
     * curl -XPUT 'http://192.168.5.251:9200/player_info/player/3' -d '{ "name": "irving", "age": 25, "salary": 2000,"team": "cav", "position": "pg"}'
     * curl -XPUT 'http://192.168.5.251:9200/player_info/player/4' -d '{ "name": "green", "age": 26, "salary": 2000,"team": "war", "position": "pf"}'
     * curl -XPUT 'http://192.168.5.251:9200/player_info/player/5' -d '{ "name": "james", "age": 33, "salary": 4000,"team": "cav", "position": "sf"}'
     */
    @Test
    public void testAddPlayer() throws IOException {

        IndexResponse response = client.prepareIndex("player_info", "player", "1")
                .setSource(
                        jsonBuilder()
                                .startObject()
                                .field("name", "James")
                                .field("age", 33)
                                .field("salary", 3000)
                                .field("team", "cav")
                                .field("position", "sf")
                                .endObject()
                ).get();


    }

    /**
     * https://elasticsearch.cn/article/102
     *
     * select team, count(*) as player_count from player group by team;
     */
    @Test
    public void testAgg1() {

        //指定索引和type
        SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
        //按team分组然后聚合,但是并没有指定聚合函数
        TermsAggregationBuilder teamAgg = AggregationBuilders.terms("player_count").field("team");
        //添加聚合器
        builder.addAggregation(teamAgg);
        //触发
        SearchResponse response = builder.execute().actionGet();
        //System.out.println(response);
        //将返回的结果放入到一个map中
        Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
//        Set<String> keys = aggMap.keySet();
//
//        for (String key: keys) {
//            System.out.println(key);
//        }

//        //取出聚合属性
        StringTerms terms = (StringTerms) aggMap.get("player_count");


        //
        //依次迭代出分组聚合数据
//        for (Terms.Bucket bucket : terms.getBuckets()) {
//            //分组的名字
//            String team = (String) bucket.getKey();
//            //count,分组后一个组有多少数据
//            long count = bucket.getDocCount();
//            System.out.println(team + " " + count);
//        }

        Iterator<Terms.Bucket> teamBucketIt = terms.getBuckets().iterator();
        while (teamBucketIt .hasNext()) {
            Terms.Bucket bucket = teamBucketIt.next();
            String team = (String) bucket.getKey();

            long count = bucket.getDocCount();

            System.out.println(team + " " + count);
        }

    }

    /**
     * select team, position, count(*) as pos_count from player group by team, position;
     */
    @Test
    public void testAgg2() {
        SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
        //指定别名和分组的字段
        TermsAggregationBuilder teamAgg = AggregationBuilders.terms("team_name").field("team");
        TermsAggregationBuilder posAgg= AggregationBuilders.terms("pos_count").field("position");
        //添加两个聚合构建器
        builder.addAggregation(teamAgg.subAggregation(posAgg));
        //执行查询
        SearchResponse response = builder.execute().actionGet();
        //将查询结果放入map中
        Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
        //根据属性名到map中查找
        StringTerms teams = (StringTerms) aggMap.get("team_name");
        //循环查找结果
        for (Terms.Bucket teamBucket : teams.getBuckets()) {
            //先按球队进行分组
            String team = (String) teamBucket.getKey();
            Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
            StringTerms positions = (StringTerms) subAggMap.get("pos_count");
            //因为一个球队有很多位置,那么还要依次拿出位置信息
            for (Terms.Bucket posBucket : positions.getBuckets()) {
                //拿到位置的名字
                String pos = (String) posBucket.getKey();
                //拿出该位置的数量
                long docCount = posBucket.getDocCount();
                //打印球队,位置,人数
                System.out.println(team + " " + pos + " " + docCount);
            }


        }
    }


    /**
     * select team, max(age) as max_age from player group by team;
     */
    @Test
    public void testAgg3() {
        SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
        //指定安球队进行分组
        TermsAggregationBuilder teamAgg = AggregationBuilders.terms("team_name").field("team");
        //指定分组求最大值
        MaxAggregationBuilder maxAgg = AggregationBuilders.max("max_age").field("age");
        //分组后求最大值
        builder.addAggregation(teamAgg.subAggregation(maxAgg));
        //查询
        SearchResponse response = builder.execute().actionGet();
        Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
        //根据team属性,获取map中的内容
        StringTerms teams = (StringTerms) aggMap.get("team_name");
        for (Terms.Bucket teamBucket : teams.getBuckets()) {
            //分组的属性名
            String team = (String) teamBucket.getKey();
            //在将聚合后取最大值的内容取出来放到map中
            Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
            //取分组后的最大值
            InternalMax ages = (InternalMax)subAggMap.get("max_age");
            double max = ages.getValue();
            System.out.println(team + " " + max);
        }
    }

    /**
     * select team, avg(age) as avg_age, sum(salary) as total_salary from player group by team;
     */
    @Test
    public void testAgg4() {
        SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
        //指定分组字段
        TermsAggregationBuilder termsAgg = AggregationBuilders.terms("team_name").field("team");
        //指定聚合函数是求平均数据
        AvgAggregationBuilder avgAgg = AggregationBuilders.avg("avg_age").field("age");
        //指定另外一个聚合函数是求和
        SumAggregationBuilder sumAgg = AggregationBuilders.sum("total_salary").field("salary");
        //分组的聚合器关联了两个聚合函数
        builder.addAggregation(termsAgg.subAggregation(avgAgg).subAggregation(sumAgg));
        SearchResponse response = builder.execute().actionGet();
        Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
        //按分组的名字取出数据
        StringTerms teams = (StringTerms) aggMap.get("team_name");
        for (Terms.Bucket teamBucket : teams.getBuckets()) {
            //获取球队名字
            String team = (String) teamBucket.getKey();
            Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
            //根据别名取出平均年龄
            InternalAvg avgAge = (InternalAvg)subAggMap.get("avg_age");
            //根据别名取出薪水总和
            InternalSum totalSalary = (InternalSum)subAggMap.get("total_salary");
            double avgAgeValue = avgAge.getValue();
            double totalSalaryValue = totalSalary.getValue();
            System.out.println(team + " " + avgAgeValue + " " + totalSalaryValue);
        }
    }


    /**
     * select team, sum(salary) as total_salary from player group by team order by total_salary desc;
     */
    @Test
    public void testAgg5() {
        SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
        //按team进行分组,然后指定排序规则
        TermsAggregationBuilder termsAgg = AggregationBuilders.terms("team_name").field("team").order(Terms.Order.aggregation("total_salary ", true));
        SumAggregationBuilder sumAgg = AggregationBuilders.sum("total_salary").field("salary");
        builder.addAggregation(termsAgg.subAggregation(sumAgg));
        SearchResponse response = builder.execute().actionGet();
        Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
        StringTerms teams = (StringTerms) aggMap.get("team_name");
        for (Terms.Bucket teamBucket : teams.getBuckets()) {
            String team = (String) teamBucket.getKey();
            Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
            InternalSum totalSalary = (InternalSum)subAggMap.get("total_salary");
            double totalSalaryValue = totalSalary.getValue();
            System.out.println(team + " " + totalSalaryValue);
        }
    }

}

package cn.edu360.es;

import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;

import java.net.InetAddress;

/**
 * Created by zx on 2017/8/15.
 */
public class HelloWorld {

    public static void main(String[] args) {

        try {

            //设置集群名称
            Settings settings = Settings.builder()
                    .put("cluster.name", "my-es")
                    .build();
            //创建client
            TransportClient client = new PreBuiltTransportClient(settings).addTransportAddresses(
                    //用java访问ES用的端口是9300
                    new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
                    new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
                    new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));
            //搜索数据(.actionGet()方法是同步的,没有返回就等待)
            GetResponse response = client.prepareGet("news", "fulltext", "1").execute().actionGet();
            //输出结果
            System.out.println(response);
            //关闭client
            client.close();

        } catch (Exception e) {
            e.printStackTrace();
        }

    }
}

package cn.edu360.es;

import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.Before;
import org.junit.Test;

import java.io.IOException;
import java.net.InetAddress;
import java.util.HashMap;

/**
 * Created by zx on 2017/9/6.
 */
public class AdminAPI {

    private TransportClient client = null;

    //在所有的测试方法之前执行
    @Before
    public void init() throws Exception {
        //设置集群名称
        Settings settings = Settings.builder().put("cluster.name", "my-es").build();
        //创建client
        client = new PreBuiltTransportClient(settings).addTransportAddresses(
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
                new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));

    }

    //创建索引,并配置一些参数
    @Test
    public void createIndexWithSettings() {
        //获取Admin的API
        AdminClient admin = client.admin();
        //使用Admin API对索引进行操作
        IndicesAdminClient indices = admin.indices();
        //准备创建索引
        indices.prepareCreate("gamelog")
                //配置索引参数
                .setSettings(
                        //参数配置器
                        Settings.builder()//指定索引分区的数量
                        .put("index.number_of_shards", 4)
                                //指定索引副本的数量(注意:不包括本身,如果设置数据存储副本为2,实际上数据存储了3份)
                        .put("index.number_of_replicas", 2)
                )
                //真正执行
                .get();
    }

    //跟索引添加mapping信息(给表添加schema信息)
    @Test
    public void putMapping() {
        //创建索引
        client.admin().indices().prepareCreate("twitter")
                //创建一个type,并指定type中属性的名字和类型
                .addMapping("tweet",
                        "{\n" +
                                "    \"tweet\": {\n" +
                                "      \"properties\": {\n" +
                                "        \"message\": {\n" +
                                "          \"type\": \"string\"\n" +
                                "        }\n" +
                                "      }\n" +
                                "    }\n" +
                                "  }")
                .get();
    }

    /**
     * 你可以通过dynamic设置来控制这一行为,它能够接受以下的选项:
     * true:默认值。动态添加字段
     * false:忽略新字段
     * strict:如果碰到陌生字段,抛出异常
     * @throws IOException
     */
    @Test
    public void testSettingsMappings() throws IOException {
        //1:settings
        HashMap<String, Object> settings_map = new HashMap<String, Object>(2);
        settings_map.put("number_of_shards", 3);
        settings_map.put("number_of_replicas", 2);

        //2:mappings(映射、schema)
        XContentBuilder builder = XContentFactory.jsonBuilder()
                .startObject()
                    .field("dynamic", "true")
                    //设置type中的属性
                    .startObject("properties")
                        //id属性
                        .startObject("num")
                            //类型是integer
                            .field("type", "integer")
                            //不分词,但是建索引
                            .field("index", "not_analyzed")
                            //在文档中存储
                            .field("store", "yes")
                        .endObject()
                        //name属性
                        .startObject("name")
                            //string类型
                            .field("type", "string")
                            //在文档中存储
                            .field("store", "yes")
                            //建立索引
                            .field("index", "analyzed")
                            //使用ik_smart进行分词
                            .field("analyzer", "ik_smart")
                        .endObject()
                    .endObject()
                .endObject();

        CreateIndexRequestBuilder prepareCreate = client.admin().indices().prepareCreate("user_info");
        //管理索引(user_info)然后关联type(user)
        prepareCreate.setSettings(settings_map).addMapping("user", builder).get();
    }

    /**
     * XContentBuilder mapping = jsonBuilder()
     .startObject()
     .startObject("productIndex")
     .startObject("properties")
     .startObject("title").field("type", "string").field("store", "yes").endObject()
     .startObject("description").field("type", "string").field("index", "not_analyzed").endObject()
     .startObject("price").field("type", "double").endObject()
     .startObject("onSale").field("type", "boolean").endObject()
     .startObject("type").field("type", "integer").endObject()
     .startObject("createDate").field("type", "date").endObject()
     .endObject()
     .endObject()
     .endObject();
     PutMappingRequest mappingRequest = Requests.putMappingRequest("productIndex").type("productIndex").source(mapping);
     client.admin().indices().putMapping(mappingRequest).actionGet();
     */


    /**
     * index这个属性,no代表不建索引
     * not_analyzed,建索引不分词
     * analyzed 即分词,又建立索引
     * expected [no], [not_analyzed] or [analyzed]
     * @throws IOException
     */

    @Test
    public void testSettingsPlayerMappings() throws IOException {
        //1:settings
        HashMap<String, Object> settings_map = new HashMap<String, Object>(2);
        settings_map.put("number_of_shards", 3);
        settings_map.put("number_of_replicas", 1);

        //2:mappings
        XContentBuilder builder = XContentFactory.jsonBuilder()
                .startObject()//
                    .field("dynamic", "true")
                    .startObject("properties")
                        .startObject("id")
                            .field("type", "integer")
                            .field("store", "yes")
                        .endObject()
                        .startObject("name")
                            .field("type", "string")
                            .field("index", "not_analyzed")
                        .endObject()
                        .startObject("age")
                            .field("type", "integer")
                        .endObject()
                        .startObject("salary")
                            .field("type", "integer")
                        .endObject()
                        .startObject("team")
                            .field("type", "string")
                            .field("index", "not_analyzed")
                        .endObject()
                        .startObject("position")
                            .field("type", "string")
                            .field("index", "not_analyzed")
                        .endObject()
                        .startObject("description")
                            .field("type", "string")
                            .field("store", "no")
                            .field("index", "analyzed")
                            .field("analyzer", "ik_smart")
                        .endObject()
                        .startObject("addr")
                            .field("type", "string")
                            .field("store", "yes")
                            .field("index", "analyzed")
                            .field("analyzer", "ik_smart")
                        .endObject()
                    .endObject()
                .endObject();

        CreateIndexRequestBuilder prepareCreate = client.admin().indices().prepareCreate("player_info");
        prepareCreate.setSettings(settings_map).addMapping("player", builder).get();

    }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值