ELK由三个软件组成
1、ElasticSearch
JAVA语言开发的,是一个开源分布式存储搜索引擎,特点:分布式、零配置、自动发现,索引自动分片,索引副本机制,restful风格接口,多数据源,自动搜索负载等.ELK官网:elastic.co/cn/
2、Logstash
布置在客户端。主要是用来日志的搜集、分析、过滤日志(字段)的工具,收集后传给Elasticsearch工具
3、Kibana
是一个WEB界面,可以为Elasticsearch和Logstash提供友好的WEB界面,连接Elasticsearch,跟Logstash没有直接的交互.
ELK解决了传统日志的两个问题
1、集中化
2、日志快速分析 过滤 统计
一、安装ELK
1、环境
[root@5723e1da4335 ~]# java --version
openjdk 11.0.1 2018-10-16
OpenJDK Runtime Environment 18.9 (build 11.0.1+13)
OpenJDK 64-Bit Server VM 18.9 (build 11.0.1+13, mixed mode)
[root@5723e1da4335 ~]# cat /etc/redhat-release
CentOS Linux release 7.3.1611 (Core)
192.168.1.151: elasticsearch
192.168.1.152: Kibana
192.168.1.153: Logstash
ELFK二进制源码包:
wget -c https://mirrors.huaweicloud.com/elasticsearch/7.8.0/elasticsearch-7.8.0-linux-x86_64.tar.gz
wget -c https://mirrors.huaweicloud.com/kibana/7.8.0/kibana-7.8.0-linux-x86_64.tar.gz
wget -c https://mirrors.huaweicloud.com/logstash/7.8.0/logstash-7.8.0.tar.gz
wget -c https://mirrors.huaweicloud.com/filebeat/7.8.1/filebeat-7.8.1-linux-x86_64.tar.gz
2、192.168.1.151安装elasticsearch
[root@5723e1da4335 ~]# tar -zxvf elasticsearch-7.8.0-linux-x86_64.tar.gz
[root@5723e1da4335 ~]# mv elasticsearch-7.8.0 /usr/local/
[root@5723e1da4335 ~]# cd /usr/local/elasticsearch-7.8.0/
[root@5723e1da4335 ~]# chown -R elk:elk /usr/local/elasticsearch-7.8.0
[root@5723e1da4335 elasticsearch-7.8.0]# vim config/elasticsearch.yml
network.host: 0.0.0.0
http.port: 9200
node.name: node-1
http.cors.enabled: true
http.cors.allow-origin: "*"
cluster.initial_master_nodes: ["node-1"]
[root@5723e1da4335 elasticsearch-7.8.0]# vim config/jvm.options
-Xms1g
-Xmx1g
8-13:-XX:+UseConcMarkSweepGC 改为8-13:-XX:+UseG1GC
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
vim /etc/security/limits.d/90-nproc.conf
soft nproc 2048
创建elk用户
[root@5723e1da4335 elasticsearch-7.8.0] useradd elk
启动elasticsearch
[elk@5723e1da4335 root]$ /usr/local/elasticsearch-7.8.0/bin/elasticsearch -d
查看日志
[elk@5723e1da4335 root]$ tailf -fn 30 /usr/local/elasticsearch-7.8.0/logs/elasticsearch.log
...
[2] bootstrap checks failed
[1]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
[2]: the default discovery settings are unsuitable for production use; at least one of [discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configured
[2020-08-30T10:04:14,007][INFO ][o.e.n.Node ] [5723e1da4335] stopping ...
[2020-08-30T10:04:14,194][INFO ][o.e.n.Node ] [5723e1da4335] stopped
[2020-08-30T10:04:14,194][INFO ][o.e.n.Node ] [5723e1da4335] closing ...
[2020-08-30T10:04:14,226][INFO ][o.e.n.Node ] [5723e1da4335] closed
[2020-08-30T10:04:14,231][INFO ][o.e.x.m.p.NativeController] [5723e1da4335] Native controller process has stopped - no new native processes can be started
以上报错了,改内核文件
vim /etc/sysctl.conf
vm.max_map_count=655360
[root@5723e1da4335 ~]# vim /usr/local/elasticsearch-7.8.0/config/elasticsearch.yml
node.name: node-1
去掉注释改为
cluster.initial_master_nodes: ["node-1"]
[root@5723e1da4335 ~]# sysctl -p
再次启动
[elk@5723e1da4335 root]$ /usr/local/elasticsearch-7.8.0/bin/elasticsearch -d
2、192.168.1.152安装配置 Kibana WEB
[root@568ede5292de ~]# tar -zxvf kibana-7.8.0-linux-x86_64.tar.gz
[root@568ede5292de ~]# mv kibana-7.8.0-linux-x86_64 /usr/local/
[root@568ede5292de kibana-7.8.0-linux-x86_64]# vim config/kibana.yml
server.host: "0.0.0.0"
i18n.locale: "zh-CN" 汉化
server.port: 5601 可以改成80
elasticsearch.hosts: ["http://192.168.1.151:9200"]
启动
[root@568ede5292de kibana-7.8.0-linux-x86_64]# nohup /usr/local/kibana-7.8.0-linux-x86_64/bin/kibana --allow-root &
3、192.168.1.153安装配置logstash
[root@e00c1dd0f3eb ~]# tar -zxvf logstash-7.8.0.tar.gz
[root@e00c1dd0f3eb ~]# mv logstash-7.8.0 /usr/local/
[root@e00c1dd0f3eb ~]# cd /usr/local/logstash-7.8.0/
[root@e00c1dd0f3eb logstash-7.8.0]#
[root@e00c1dd0f3eb logstash-7.8.0]# vim config/logstash.yml
http.host: "0.0.0.0" 改不改没关系
[root@e00c1dd0f3eb logstash-7.8.0]# vim config/nginx_access.log.conf
input {
file {
type => "nginx-logs"
path => "/usr/local/nginx/logs/access.log"
}
}
output {
elasticsearch {
hosts => "192.168.1.151:9200" }
}
启动
[root@e00c1dd0f3eb logstash-7.8.0]# nohup /usr/local/logstash-7.8.0/bin/logstash -f /usr/local/logstash-7.8.0/config/nginx_access.log.conf &
[root@e00c1dd0f3eb logstash-7.8.0]# netstat -tnlp|grep java
tcp6 0 0 :::9600 :::* LISTEN 203/java
[root@e00c1dd0f3eb logstash-7.8.0]# tail -fn 5 nohup.out
[2020-08-30T11:57:19,554][INFO ][logstash.inputs.file ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/usr/local/logstash-7.8.0/data/plugins/inputs/file/.sincedb_d2343edad78a7252d2ea9cba15bbff6d", :path=>["/usr/local/nginx/logs/access.log"]}
[2020-08-30T11:57:19,600][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2020-08-30T11:57:19,753][INFO ][filewatch.observingtail ][main][236cd606a61d908835bf81af492eeafa8ab3a522a531d7ce340f99e7fd577bfb] START, creating Discoverer, Watch with file and sincedb collections
[2020-08-30T11:57:19,769][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2020-08-30T11:57:20,395][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
三、在kibana web中得新建logstash的索引模式
四、es集群
for i in `seq 1 3`;do echo "";echo es_node$i; cat /usr/local/docker_es_node$i/config/elasticsearch.yml ;done
es_node1
cluster.name: es-cluster
node.name: es-node1
path.data: /usr/local/elasticsearch/data
path.logs: /usr/local/elasticsearch/logs
network.host: 0.0.0.0
network.publish_host: 192.168.0.83
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.seed_hosts: ["192.168.0.83:9300","192.168.0.83:9301","192.168.0.83:9302"]
cluster.initial_master_nodes: ["es-node1"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
es_node2
cluster.name: es-cluster
node.name: es-node2
path.data: /usr/local/elasticsearch/data
path.logs: /usr/local/elasticsearch/logs
network.host: 0.0.0.0
network.publish_host: 192.168.0.83
http.port: 9201
transport.tcp.port: 9301
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: false
node.data: true
discovery.seed_hosts: ["192.168.0.83:9300","192.168.0.83:9301","192.168.0.83:9302"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
cluster.initial_master_nodes: ["es-node1"]
es_node3
cluster.name: es-cluster
node.name: es-node3
path.data: /usr/local/elasticsearch/data
path.logs: /usr/local/elasticsearch/logs
network.host: 0.0.0.0
network.publish_host: 192.168.0.83
http.port: 9202
transport.tcp.port: 9302
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: false
node.data: true
discovery.seed_hosts: ["192.168.0.83:9300","192.168.0.83:9301","192.168.0.83:9302"]
cluster.initial_master_nodes: ["es-node1"]
discovery.zen.fd.ping_timeout: 1m
discovery.zen.fd.ping_retries: 5
检测
[root@localhost local]# curl 192.168.0.83:9200/_cat/nodes
192.168.0.83 55 96 25 1.00 1.01 1.05 dilmrt * es-node1
192.168.0.83 42 96 25 1.00 1.01 1.05 dilrt - es-node2
192.168.0.83 56 96 25 1.00 1.01 1.05 dilrt - es-node3
-----------------end