集群架构部署图
1. 服务器配置信息
服务 | 服务器ip | 启动端口 |
---|---|---|
ES01、logstash | 192.168.159.130 | 9200、9300、9600 |
logstash01 | 192.168.159.131 | 9600 |
kibana+logstash | 192.168.159.132 | 9600、5601 |
redis+logstash02 | 192.168.159.133 | 9600、6379 |
ES02(master) | 192.168.159.134 | 9200、9300 |
2. 配置elasticsearch集群
node1:192.168.159.130(jdk环境)
node2:192.168.159.131(jdk环境)
node3:192.168.159.132(jdk环境)
1.首先安装node1 master节点
rpm -ivh elasticsearch-6.8.1.rpm
#创建elasticsearch用户和组
groupadd elasticsearch
useradd elasticsearch -s /sbin/nologin
#目录权限更改
chown -R elasticsearch:elasticsearch /var/log/elasticsearch /var/lib/elasticsearch
#elasticsearch node1主配置文件
grep '^[a-Z]' /etc/elasticsearch/elasticsearch.yml
cluster.name: ELK-Cluster
node.name: es-node1
node.master: true
node.data: true
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
network.host: 0.0.0.0
http.port: 9200
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping.unicast.hosts: ["192.168.159.130","192.168.159.131","192.168.159.132"]
#修改内存限制
[root@node2 ~]# vim /usr/lib/systemd/system/elasticsearch.service
LimitNOFILE=65535
LimitNPROC=4096
LimitMEMLOCK=infinity #添加这条信息
vim /etc/elasticsearch/jvm.options
-Xms1g
-Xmx1g
#修改系统文件配置
vim /etc/security/limits.conf
* soft nproc 10000
* hard nproc 10000
* soft nofile 65536
* hard nofile 65536
* soft memlock 32000
* hard memlock 32000
#elasticsearch服务启动
systemctl start elasticsearch.service
systemctl enbale elasticsearch.service
#node2和node3复制node1的配置文件,操作步骤一致,仅需要修改node.name: 节点名称
scp /etc/elasticsearch/elasticsearch.yml root@192.168.159.132:/etc/elasticsearch/
scp /etc/elasticsearch/elasticsearch.yml root@192.168.159.131:/etc/elasticsearch/
vim /etc/elasticsearch/elasticsearch.yml
cluster.name: ELK-Cluster
node.name: es-node2 #必须修改节点名称
node.master: true
node.data: true
3. logstash与kibana配置
1.logstash配置
#安装logstash
rpm -ivh logstash-6.8.1.rpm
#修改启动脚本内容
ExecStart=/usr/share/logstash/bin/logstash "--path.settings" "/etc/logstash/" "--path.config" "/etc/logstash/conf.d"
#添加logstash用户及授权目录
groupadd logstash
useradd logstash -s /sbin/nologin
chown -R logstash:logstash /usr/share/logstash/data/
#配置文件
grep '^[a-Z]' /etc/logstash/logstash.yml
path.data: /var/lib/logstash
path.config: /etc/logstash/conf.d/*.conf
path.logs: /var/log/logstash
#服务启动
systemctl start logstash.service
systemctl enbale logstash.service
2.kibana配置
#安装kibana
rpm -ivh kibana-6.8.1.rpm
#配置
grep '^[a-Z]' /etc/kibana/kibana.yml
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.159.130:9200"]
i18n.locale: "zh-CN"
#kibana服务启动
systemctl start kibana.service
systemctl enbale kibana.service
4. redis安装配置
#安装redis
yum install epel-release #安装redis源
yum install -y redis
#修改配置文件
vim /etc/redis.conf
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile /var/log/redis/redis.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression no
rdbchecksum no
requirepass 123456 #添加登录密码
#启动
systemctl start redis.service
#登录redis
redis-cli
登录成功以后通过命令授权
192.168.142.137:6379> auth 123456
OK
5. 将日志传递给redis存储
logstash负责收集日志,通过配置conf文件
vim /etc/logstash/conf.d/message_logs.conf
input {
file {
path => '/var/log/messages' #文件路径
start_position => 'beginning' #文件采集的初始位置,beginning(开头),end(结尾)
type => 'messagelogs' # 文本标记
}
}
output {
#elasticsearch {
#hosts => ["http://192.168.159.134:9200"]
#index => 'system-node6-log-%{+YYYY.MM.dd}'
#}
redis {
data_type => "list" #redis数据类型
key => "message-test-logs-133" #key名称
host => "192.168.159.132" #redis地址
port => "6379" #redis端口
db => 0 #redis存放到数据库
password => "123456" #密码
}
}
启动logstash,查看redis中db 0 的数据库中是否有message-test-logs-133 的key,里面的内容如下:
6. 将redis中的数据取出,存放到ES
需要另一个logstash配置conf文件
vim /etc/logstash/conf.d/redis-to-els.conf
input {
redis {
data_type => "list"
key => "message-test-logs-133"
host => "192.168.159.132"
port => "6379"
db => 0
password => "123456"
codec => "json" #转换成json格式
}
}
output {
elasticsearch {
hosts => ["192.168.159.134:9200"] # es地址
index => "message-133-logs-%{+YYYY.MM.dd}" #创建索引
}
}
启动logstash后,查看redis->db 0 -> key:message-test-logs-133 是否存在,不存在说明已经存到es中
7. es中查看是否有索引生成,并在kibana中查看日志
curl '192.168.159.134:9200/_cat/indices?v' #查看生成索引
green open message-133-logs-2020.11.13 DNi1fk7WRCO2FB7iDcPrpg 5 1 31 0 97.2kb 48.6kb
green open system-node6-log-2020.11.13 ffJd5bBPS6G5h22md6EgTQ 5 1 28 0 174.7kb 96.7kb