cd /opt
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.19-x86_64.rpm
yum -y install https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.19-x86_64.rpm
mkdir -p /data/es/data
mkdir -p /data/es/logs
chown -R elasticsearch. /data/es/
chown -R elasticsearch. /etc/elasticsearch/
cat >/usr/lib/systemd/system/elasticsearch.service<<"EOF"
[Unit]
Description=Elasticsearch
Documentation=https://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
Type=notify
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/usr/share/elasticsearch
Environment=ES_PATH_CONF=/etc/elasticsearch
Environment=PID_DIR=/var/run/elasticsearch
Environment=ES_SD_NOTIFY=true
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/usr/share/elasticsearch
User=elasticsearch
Group=elasticsearch
ExecStart=/usr/share/elasticsearch/bin/systemd-entrypoint -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65535
LimitMEMLOCK=infinity
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
TimeoutStartSec=75
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/elasticsearch/elasticsearch.yml<<"EOF"
cluster.name: dpzr-es
node.name: dpzr-es20
node.master: true
node.data : ture
node.ingest: false
node.attr.rack_id: rack_dpzr-es20
cluster.routing.allocation.awareness.attributes: rack_id
cluster.routing.allocation.same_shard.host: true
path.data: /data/es/data
path.logs: /data/es/logs
bootstrap.memory_lock: true
indices.memory.index_buffer_size: 20%
indices.recovery.max_bytes_per_sec: 1g
network.host: 0.0.0.0
network.publish_host: 10.111.101.333
http.port: 9200
transport.tcp.port: 9300
discovery.seed_hosts: ["10.111.101.18:9300","10.111.101.19:9300","10.111.101.20:9300"]
cluster.initial_master_nodes: ["10.111.101.18","10.111.101.19","10.111.101.20"]
cluster.fault_detection.leader_check.interval: 20s
discovery.cluster_formation_warning_timeout: 30s
cluster.join.timeout: 120s
cluster.publish.timeout: 90s
client.transport.ping_timeout: 60s
action.destructive_requires_name: true
xpack.ml.enabled: false
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.keystore.type: PKCS12
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.type: PKCS12
xpack.security.audit.enabled: true
thread_pool:
write:
size: 7
indices.fielddata.cache.size: 20%
indices.breaker.fielddata.limit: 30%
EOF
#标记
如果是生产环境可以对 size:调整17
size : 17
cat >/etc/elasticsearch/jvm.options<<"EOF"
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
14-:-XX:+UseG1GC
-Djava.io.tmpdir=${ES_TMPDIR}
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=/var/lib/elasticsearch
-XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:/var/log/elasticsearch/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
EOF
# 注释 如果生产环境可以加上这两个参数根据服务器内存大小
-Xms30g
-Xms30g
/usr/share/elasticsearch/bin/elasticsearch-certutil ca
/usr/share/elasticsearch/bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
cd /usr/share/elasticsearch
mv elastic-* /etc/elasticsearch/
cd /etc/elasticsearch/
chown elasticsearch. *
systemctl daemon-reload
systemctl stop elasticsearch.service
systemctl start elasticsearch.service
systemctl status elasticsearch.service
#三台服务器都执行成功之后 进行最后一步步骤执行,在第一台服务器执行
/usr/share/elasticsearch/bin/elasticsearch-setup-passwords auto
#最后检查服es
curl -u elastic http://172.31.28.209:9200/_cat/health
#rpm install kibana
notice: 注意 kibana 版本要和es 版本一致
cd /opt
wget https//artifacts.elastic.co/downloads/kibana/kibana-7.17.21-x86_64.rpm
yum localinstall kibana-7.17.21-x86_64.rpm
配置文件内容如下
cat /etc/kibana/kibana.yml | grep -v '^#'| grep -v '^$'
server.port:56010
server.host:"0.0.0.0"
elasticsearch.hosts:"http://192.168.28.206:9200"
# rpm 安装logstash
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.4.2.rpm
yum localinstall logstash-7.4.2.rpm
# wgereis logstash
logstash:/etc/logstash /usr/share/logstash
#安装redis 代理predixy
predixy 是高性能的适用于redis 集群和哨兵代理
predixy github 地址:https://githubb.com/joyieldnc/predixy
只需要在其中一台服务器上执行即可:
wget https://github.com/joyieldlnc/predixy/releases/download/1.0.5/predixy-1.0.5-bin-amd64-linux.tar.gz
tar zxvf predixy-1.0.5-bin-amd64-linux.tar.gz -C /usr/local/
ln -snf /usr/local/predixy-1.0.5 predixy
cd /usr/local/predixy/conf
#修改里面的配置文件: predixy.conf和cluster.conf
### predixy 支持多种架构,由于使用的是redis cluster z只需要配置redis cluster 对应的配置文件cluster.conf 即可
配置
cluster.conf
cd /usr/local/predixy/conf
vim cluster.conf
ClusterServerPool{
Password guess_guess_101
MasterReadPriorrity 60
StaticSlaveReadPriority 50
DynamicSlaveReadPriority 50
Refreshlnterval 1
ServerTimeout 1
ServerFailureLimit 10
ServerRetryTimeout 1
KeepAlive 120
Servers {
+ 172.30.0.11:8001
+ 172.30.0.12:8001
+ 172.30.0.13:8001
+ 172.30.0.11:8002
+ 172.30.0.12:8002
+ 172.30.0.13:8002
}
}
#配置predixy.conf
cat predixy.conf | grep -v '^#' | grep -v '^$'
Name Predixy
Bind 0.0.0.0:7003
WorkerThreads 1
MaxMemory 100M
MaxMemoryy 1G
ClientTimeout 300
Log ./predixy.log
LogVerbSample 0
LogDebugSample 0
LogInfoSample 10000
LogNoticeSample 1
LogWarnSample 1
LogErrorSample 1
Include auth.conf
Include cluster.conf
##注意 Include 部分 只需要包括 cluster.conf 和 auth.conf.其他的则注释掉,否则可能会影响到predixy 的使用
## 如果 cluster.conf 里面配置了密码 Password guess_guess_101 那 auth.conf 也可以注释掉,最终只保留cluster.conf 即可
配置 auth.conf
cat auth.conf | grep -v '^#' | grep -v '^$'
Authority {
Auth guess_guess_101 {
Mode admin
}
}
启动 predixy
nohup /usr/local/predixy/bin/predixy /usr/local/predixy/conf/predixy.conf &
}