Linux 下 Elastic Search 8.2.3 权限认证模式环境搭建与IK分词器安装
Elastic Search 版本 :
- elasticsearch-8.2.3-linux-x86_64.tar.gz
Linux版本:
-
系统版本:CentOS Linux release 7.5.1804 (Core)
-
内核版本:Linux version 3.10.0-862.el7.x86_64 (Red Hat 4.8.5-28)
参考博客:
- ElasticSearch8集群的安装部署_不要再说了哇的博客-CSDN博客
- Linux关闭防火墙命令_魔道不误砍柴功的博客-CSDN博客_linux关闭防火墙命令
- memory locking requested for elasticsearch process but memory is not locked_璐程风的博客-CSDN博客
下载ES
下载地址:
- Download Elasticsearch | Elastic
https://www.elastic.co/cn/downloads/elasticsearch
上传压缩包并解压
##### 此处我先上传到了 /home/software/ 目录
[root@test-dc-server ~]# cd /home/software/
[root@test-dc-server software]# tar -xzf elasticsearch-8.2.3-linux-x86_64.tar.gz
[root@test-dc-server software]# mv elasticsearch-8.2.3 /usr/local/
[root@test-dc-server software]# cd /usr/local/elasticsearch-8.2.3/
[root@test-dc-server elasticsearch-8.2.3]# ll
total 876
drwxr-xr-x. 2 elastic elastic 4096 Jun 8 18:27 bin
drwxr-xr-x. 4 elastic elastic 223 Jul 19 22:56 config
drwxr-xr-x. 2 elastic elastic 6 Jul 17 23:18 data
drwxr-xr-x. 8 elastic elastic 96 Jun 8 18:27 jdk
drwxr-xr-x. 4 elastic elastic 4096 Jun 8 18:27 lib
-rw-r--r--. 1 elastic elastic 3860 Jun 8 18:21 LICENSE.txt
drwxr-xr-x. 2 elastic elastic 6 Jun 8 18:25 logs
drwxr-xr-x. 65 elastic elastic 4096 Jun 8 18:28 modules
-rw-r--r--. 1 elastic elastic 873453 Jun 8 18:25 NOTICE.txt
drwxr-xr-x. 3 elastic elastic 45 Jul 18 21:05 plugins
-rw-r--r--. 1 elastic elastic 2710 Jun 8 18:21 README.asciidoc
# 创建一个数据目录,生产环境的data目录最好不要放在es安装目录下
[root@test-dc-server elasticsearch-8.2.3]# mkdir data
# 查看配置目录
/usr/local/elasticsearch-8.2.3/config
[root@test-dc-server config]# ll
total 40
-rw-rw----. 1 elastic elastic 1042 Jun 8 18:21 elasticsearch-plugins.example.yml
-rw-rw----. 1 elastic elastic 1475 Jul 19 23:01 elasticsearch.yml
-rw-rw----. 1 elastic elastic 2992 Jun 8 18:21 jvm.options
drwxr-x---. 2 elastic elastic 6 Jun 8 18:25 jvm.options.d
-rw-rw----. 1 elastic elastic 16644 Jun 8 18:24 log4j2.properties
-rw-rw----. 1 elastic elastic 473 Jun 8 18:23 role_mapping.yml
-rw-rw----. 1 elastic elastic 197 Jun 8 18:23 roles.yml
-rw-rw----. 1 elastic elastic 0 Jun 8 18:23 users
-rw-rw----. 1 elastic elastic 0 Jun 8 18:23 users_roles
配置文件修改
elasticsearch.yml 就是 es 的配置文件了,下面来修改配置文件
# ---------------------------------- Cluster -----------------------------------
# 集群名称,单节点也是一个集群
cluster.name: test-cluster
# ------------------------------------ Node ------------------------------------
# 节点名称,当前节点名称
node.name: node-1
# node.roles: [master,data] # 注意配置集群时至少有两个具有选举master资格的节点,由于我这里只是单节点集群,所以注释这个配置
# ----------------------------------- Paths ------------------------------------
# 数据目录
path.data: /usr/local/elasticsearch-8.2.3/data
# 日志文件
path.logs: /usr/local/elasticsearch-8.2.3/logs
# ---------------------------------- Network -----------------------------------
# 绑定的 IP,和 redis 一样,0 允许所有 IP 链接
network.host: 0.0.0.0
# 设置 es 服务端口,默认就是 9200
http.port: 9200
# --------------------------------- Discovery ----------------------------------
# 节点发现
cluster.initial_master_nodes: ["node-1"]
# 允许http访问,这样eshead就能用了
http.cors.allow-origin: "*"
http.cors.enabled: true
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
http.max_content_length: 500mb
jvm.options 更改 jvm 所需内存
# 根据虚拟机内存大小来分配
-Xms128m #或者-Xms2g
-Xmx128m #或者-Xmx2g
更改优化服务器配置
# vim /etc/security/limits.conf
添加以下内容:
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
在以下配置文件中添加参数:
# vim /etc/sysctl.conf
vm.max_map_count=655360
# sysctl -p
调整文件打开数限制:
# ulimit -HSn 65535
es 不允许 root 用户启动,需要新增一个用户来启动 es
# 查看当前用户
[root@test-dc-server elasticsearch-8.2.3]# whoami
root
# 增加一个用户
[root@test-dc-server elasticsearch-8.2.3]# useradd elastic
# 将 es 的目录包括所有文件都变更为该用户
[root@test-dc-server elasticsearch-8.2.3]# chown -R elastic:elastic /usr/local/elasticsearch-8.2.3
# 可以看到拥有者和所属组都已经变更为 elastic 了
[root@test-dc-server elasticsearch-8.2.3]# ll
total 876
drwxr-xr-x. 2 elastic elastic 4096 Jun 8 18:27 bin
drwxr-xr-x. 4 elastic elastic 223 Jul 19 22:56 config
...
配置集群间安全访问证书密钥:
仅在集群的第一台服务器 node-01 执行
# 签发ca证书
[root@test-dc-server /]# cd /usr/local/elasticsearch-8.2.3/bin
[root@test-dc-server bin]# ./elasticsearch-certutil ca
`【ENTER】` 什么也不用输入直接回车
`【ENTER】` 什么也不用输入直接回车
# 用ca证书签发节点证书
[root@test-dc-server bin]# ./elasticsearch-certutil cert --ca elastic-stack-ca.p12
`【ENTER】` 什么也不用输入直接回车
`【ENTER】` 什么也不用输入直接回车
`【ENTER】` 什么也不用输入直接回车
# 将证书放到certs目录(手动创建)
[root@test-dc-server bin]# cd ..
[root@test-dc-server elasticsearch-8.2.3]# mkdir config/certs
[root@test-dc-server elasticsearch-8.2.3]# mv elastic-certificates.p12 elastic-stack-ca.p12 config/certs
配置集群间安全访问证书密钥的操作流程演示如下:
[root@test-dc-server bin]# ./elasticsearch-certutil ca
warning: ignoring JAVA_HOME=/opt/software/jdk1.8.0_11; using bundled JDK
This tool assists you in the generation of X.509 certificates and certificate
signing requests for use with SSL/TLS in the Elastic stack.
The 'ca' mode generates a new 'certificate authority'
This will create a new X.509 certificate and private key that can be used
to sign certificate when running in 'cert' mode.
Use the 'ca-dn' option if you wish to configure the 'distinguished name'
of the certificate authority
By default the 'ca' mode produces a single PKCS#12 output file which holds:
* The CA certificate
* The CA's private key
If you elect to generate PEM format certificates (the -pem option), then the output will
be a zip file containing individual files for the CA certificate and private key
Please enter the desired output file [elastic-stack-ca.p12]: ####直接回车
Enter password for elastic-stack-ca.p12: ####直接回车
[root@test-dc-server bin]#
[root@test-dc-server bin]#
[root@test-dc-server bin]#
[root@test-dc-server bin]# ./elasticsearch-certutil cert --ca elastic-stack-ca.p12
warning: ignoring JAVA_HOME=/opt/software/jdk1.8.0_11; using bundled JDK
This tool assists you in the generation of X.509 certificates and certificate
signing requests for use with SSL/TLS in the Elastic stack.
The 'cert' mode generates X.509 certificate and private keys.
* By default, this generates a single certificate and key for use
on a single instance.
* The '-multiple' option will prompt you to enter details for multiple
instances and will generate a certificate and key for each one
* The '-in' option allows for the certificate generation to be automated by describing
the details of each instance in a YAML file
* An instance is any piece of the Elastic Stack that requires an SSL certificate.
Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats
may all require a certificate and private key.
* The minimum required value for each instance is a name. This can simply be the
hostname, which will be used as the Common Name of the certificate. A full
distinguished name may also be used.
* A filename value may be required for each instance. This is necessary when the
name would result in an invalid file or directory name. The name provided here
is used as the directory name (within the zip) and the prefix for the key and
certificate files. The filename is required if you are prompted and the name
is not displayed in the prompt.
* IP addresses and DNS names are optional. Multiple values can be specified as a
comma separated string. If no IP addresses or DNS names are provided, you may
disable hostname verification in your SSL configuration.
* All certificates generated by this tool will be signed by a certificate authority (CA)
unless the --self-signed command line option is specified.
The tool can automatically generate a new CA for you, or you can provide your own with the --ca or --ca-cert command line options.
By default the 'cert' mode produces a single PKCS#12 output file which holds:
* The instance certificate
* The private key for the instance certificate
* The CA certificate
If you specify any of the following options:
* -pem (PEM formatted output)
* -multiple (generate multiple certificates)
* -in (generate certificates from an input file)
then the output will be be a zip file containing individual certificate/key files
Enter password for CA (elastic-stack-ca.p12) : ####直接回车
Please enter the desired output file [elastic-certificates.p12]: ####直接回车
Enter password for elastic-certificates.p12 : ####直接回车
########################### 证书秘钥生成位置 ##########################
Certificates written to /usr/local/elasticsearch-8.2.3/elastic-certificates.p12
This file should be properly secured as it contains the private key for your instance.
This file is a self contained file and can be copied and used 'as is'
For each Elastic product that you wish to configure, you should copy
this '.p12' file to the relevant configuration directory
and then follow the SSL configuration instructions in the product guide.
For client applications, you may only need to copy the CA certificate and configure the client to trust this certificate.
[root@test-dc-server bin]#
[root@test-dc-server bin]#
[root@test-dc-server bin]#
[root@test-dc-server bin]#
[root@test-dc-server bin]# cd ..
# 将证书放到config/certs目录(手动创建)
[root@test-dc-server elasticsearch-8.2.3]# mkdir config/certs
[root@test-dc-server elasticsearch-8.2.3]# mv elastic-certificates.p12 elastic-stack-ca.p12 config/certs
配置文件elasticsearch.yml中添加证书密钥
[root@test-dc-server elasticsearch-8.2.3]# vi config/elasticsearch.yml
# 添加以下参数:
xpack.security.enabled: true
xpack.security.transport.ssl:
enabled: true
verification_mode: none
keystore.path: /usr/local/elasticsearch-8.2.3/config/certs/elastic-certificates.p12
truststore.path: /usr/local/elasticsearch-8.2.3/config/certs/elastic-certificates.p12
ingest.geoip.downloader.enabled: false
创建剩余集群节点
如果是虚拟机的话,可以直接克隆刚刚做好的机器(或者不克隆,把上面步骤重新执行、证书文件从节点1拷贝至相同目录)
有关ES的配置只需要更改 elasticsearch.yml 文件,其他配置保持一致
修改集群节点名称:node-1、node-2、node-3
修改节点角色(按需求更改):
node.roles: [master,data]
node.roles: [master,data]
node.roles: [master,data]
安装IK分词插件(全部节点执行)
下载对应版本的IK安装包:https://github.com/medcl/elasticsearch-analysis-ik/releases,
IK安装包版本需要与ElasticSearch版本一致
进入ElasticSearch的安装目录,将下载好的对应版本的ik分词器压缩包进行解压,将解压后的文件夹拷贝到plugins插件目录,并修改文件夹属组为elastic。
我这里是直接将压缩包传到了plugins文件夹,解压之后再将压缩包移出来或者删除,也可以在本地解压之后将文件夹上传到plugins目录
[root@test-dc-server plugins]# ll
total 3192
-rw-r--r--. 1 root root 3266177 Jul 20 02:28 elasticsearch-analysis-ik-8.2.3.zip
[root@test-dc-server plugins]# unzip elasticsearch-analysis-ik-8.2.3.zip
[root@test-dc-server plugins]# ll
total 3192
drwxrwxr-x. 6 root root 150 Jan 18 22:59 elasticsearch-analysis-ik-8.2.3
-rw-r--r--. 1 root root 3266177 Jul 20 02:28 elasticsearch-analysis-ik-8.2.3.zip
[root@test-dc-server plugins]# chown -R elastic:elastic elasticsearch-analysis-ik-8.2.3
[root@test-dc-server plugins]# ll
total 3192
drwxrwxr-x. 6 elastic elastic 150 Jan 18 22:59 elasticsearch-analysis-ik-8.2.3
-rw-r--r--. 1 root root 3266177 Jul 20 02:28 elasticsearch-analysis-ik-8.2.3.zip
####### plugins目录下不能放文件,只能放文件夹,所以需要将压缩包移出去
[root@test-dc-server plugins]# mv elasticsearch-analysis-ik-8.2.3.zip ..
[root@test-dc-server plugins]# ll
total 0
drwxrwxr-x. 6 elastic elastic 150 Jan 18 22:59 elasticsearch-analysis-ik-8.2.3
[root@test-dc-server plugins]#
启动 es
# 进入到 bin 下
cd /usr/local/elasticsearch-8.2.3/bin
# 启动 es,发现报错了,这里目前还是用 root 用户启动的,所以直接报错,不能使用 root 运行 es
[root@test-dc-server bin]# ./elasticsearch -d
OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
[2021-08-08T19:30:25,383][WARN ][o.e.b.ElasticsearchUncaughtExceptionHandler] [node-1] uncaught exception in thread [main]
org.elasticsearch.bootstrap.StartupException: java.lang.RuntimeException: can not run elasticsearch as root
# 切换到 elastic 用户
[root@test-dc-server bin]# su elastic
[elastic@test-dc-server bin]# ./elasticsearch -d
...
###### 以下是启动失败时可能出现的错误,解决办法在最后 #######
ERROR: [2] bootstrap checks failed
# 错误 1:es 能打开的文件句柄太少了 需要提升至 65535
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535]
# 错误 2:这个参数设置太少了,也需要提升
[2]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
# 还有可能会出现 max number of threads [3756]...,这个是能打开的线程数量太少了
[2021-08-08T19:34:42,127][INFO ][o.e.n.Node ] [node-1] stopping ...
[2021-08-08T19:34:42,154][INFO ][o.e.n.Node ] [node-1] stopped
[2021-08-08T19:34:42,155][INFO ][o.e.n.Node ] [node-1] closing ...
[2021-08-08T19:34:42,196][INFO ][o.e.n.Node ] [node-1] closed
启动成功后,更改各用户的密码(如果集群搭建成功只需要在一个节点上更改即可),密码都设置为了 elastic
[elastic@test-dc-server bin]$ ./elasticsearch-setup-passwords interactive
warning: ignoring JAVA_HOME=/opt/software/jdk1.8.0_11; using bundled JDK
******************************************************************************
Note: The 'elasticsearch-setup-passwords' tool has been deprecated. This command will be removed in a future release.
******************************************************************************
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,kibana_system,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
### 以下密码我都设置的是elastic
Enter password for [elastic]:
Reenter password for [elastic]:
Enter password for [apm_system]:
Reenter password for [apm_system]:
Enter password for [kibana_system]:
Reenter password for [kibana_system]:
Enter password for [logstash_system]:
Reenter password for [logstash_system]:
Enter password for [beats_system]:
Reenter password for [beats_system]:
Enter password for [remote_monitoring_user]:
Reenter password for [remote_monitoring_user]:
Changed password for user [apm_system]
Changed password for user [kibana_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]
可能遇到的错误与解决办法
错误“memory locking requested for elasticsearch process but memory is not locked”的解决
-
解决方法一(关闭bootstrap.memory_lock,会影响性能):
# vi /usr/local/elasticsearch-8.2.3/elasticsearch.yml
bootstrap.memory_lock: false // 设置成false就正常运行了。
-
解决方法二(开启bootstrap.memory_lock):
1. 修改文件/etc/elasticsearch/elasticsearch.yml
如果开启还要修改其它系统配置文件
# vi /usr/local/elasticsearch-8.2.3/elasticsearch.yml
bootstrap.memory_lock: true
2. 修改文件/etc/security/limits.conf(错误1只需执行这一步)
# vi /etc/security/limits.conf
最后添加以下内容
* soft nofile 65536 * hard nofile 65536 * soft nproc 32000 * hard nproc 32000 * hard memlock unlimited * soft memlock unlimited
3. 修改文件 /etc/systemd/system.conf
# vi /etc/systemd/system.conf
分别修改以下内容
DefaultLimitNOFILE=65536 DefaultLimitNPROC=32000 DefaultLimitMEMLOCK=infinity
错误1:max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535] 的解决
-
修改文件/etc/security/limits.conf
# vi /etc/security/limits.conf
-
最后添加以下内容
* soft nofile 65536 * hard nofile 65536 * soft nproc 32000 * hard nproc 32000 * hard memlock unlimited * soft memlock unlimited
错误2:max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] 的解决
-
先使用命令su -l root将用户切换为root,
执行下面命令:
# sysctl -w vm.max_map_count=262144
执行下面命令查看:
# sysctl -a|grep vm.max_map_count
然后在 /etc/sysctl.conf文件最后添加一行
# vi /etc/sysctl.conf
vm.max_map_count=262144
-
输入命令:
# sysctl -p
立即生效
访问是否生效
启动成功会有以下几个我们需要了解的日志信息
[2021-08-08T20:03:45,441][INFO ][o.e.t.TransportService ] [node-1] publish_address {10.0.2.106:9300}, bound_addresses {[::]:9300}
[2021-08-08T20:03:45,961][INFO ][o.e.h.AbstractHttpServerTransport] [node-1] publish_address {10.0.2.106:9200}, bound_addresses {[::]:9200}
- 9200:是对外服务的
- 9300:是 es 内部使用的,比如集群内部通信
访问 http://127.0.0.1:9200(IP请换成自己机器的),账号:elastic,密码:elastic
{
"name" : "node-1", # 节点名称
"cluster_name" : "test-cluster", # 集群名称
"cluster_uuid" : "2LFyILVGQbeoGd69R2nQhw",
"version" : {
"number" : "8.2.3", # 版本号
"build_flavor" : "default",
"build_type" : "tar",
"build_hash" : "9905bfb62a3f0b044948376b4f607f70a8a151b4",
"build_date" : "2022-06-08T22:21:36.455508792Z",
"build_snapshot" : false,
"lucene_version" : "9.1.0", # lucene 的版本
"minimum_wire_compatibility_version" : "7.17.0", # 兼容写入的 es 版本为 7.17.0
"minimum_index_compatibility_version" : "7.0.0" # 兼容 index 读的 es 版本为 7.0.0
},
"tagline" : "You Know, for Search"
}
如果启动成功但是发现访问9200端口没响应,则可能是防火墙没关,此时需要关闭防火墙,或者开放9200端口
关闭防火墙命令
# 1:查看防火状态
# systemctl status firewalld
# service iptables status
# 2:暂时关闭防火墙
# systemctl stop firewalld
# service iptables stop
# 3:永久关闭防火墙
# systemctl disable firewalld
# chkconfig iptables off
# 4:重启防火墙
# systemctl enable firewalld
# service iptables restart
# 5:永久关闭后重启
# chkconfig iptables on
开放端口命令
1.方式一:
# 1.开启防火墙
# systemctl start firewalld
# 2.开放指定端口(比如9200端口)
# firewall-cmd --zone=public --add-port=9200/tcp --permanent
命令含义:
--zone #作用域
--add-port=1935/tcp #添加端口,格式为:端口/通讯协议
--permanent #永久生效,没有此参数重启后失效
# 3.重启防火墙
# firewall-cmd --reload
# 4.查看端口号
# netstat -ntlp #查看当前所有tcp端口·
# netstat -ntulp |grep 9200 #查看所有9200端口使用情况·
2.方式二:
# 开放端口 9200
# /sbin/iptables -I INPUT -p tcp --dport 9200 -j ACCEPT
3.方式三:
# -A INPUT -m state --state NEW -m tcp -p tcp --dport 9200 -j ACCEPT
# service iptables restart