ELK
ELK是Elasticsearch、Logstash、Kibana的简称,这三者是核心套件,但并非全部。
Elasticsearch是实时全文搜索和分析引擎,提供搜集、分析、存储数据三大功能;是一套开放REST和JAVA API等结构提供高效搜索功能,可扩展的分布式系统。它构建于Apache Lucene搜索引擎库之上。
Logstash是一个用来搜集、分析、过滤日志的工具。它支持几乎任何类型的日志,包括系统日志、错误日志和自定义应用程序日志。它可以从许多来源接收日志,这些来源包括 syslog、消息传递(例如 RabbitMQ)和JMX,它能够以多种方式输出数据,包括电子邮件、websockets和Elasticsearch。
Kibana是一个基于Web的图形界面,用于搜索、分析和可视化存储在 Elasticsearch指标中的日志数据。它利用Elasticsearch的REST接口来检索数据,不仅允许用户创建自己的数据的定制仪表板视图,还允许用户以特殊的方式查询和过滤数据。
原理介绍
本次部署的是FileBeat(客户端),ElasticSearch + LogStash + Kibana(服务端)组成的架构
业务请求到达Nginx客户端机器上的Nginx,Nginx响应请求,并在access.log文件中增加
访问记录;FileBeat搜集新增的日志,通过LogStash的5044端口上传日志;LogStash将日志
信息通过本机的9200端口传入到ElasticSearch;搜集日志的用户通过浏览器访问Kibana,服务器
端口是5601;Kibana通过9200端口访问ElasticSearch;
实验环境
本次部署的是单点ELK用了两台机器(CentOS-7.6)
ELK服务端:192.168.10.92
Nginx客户端:192.168.10.91
所需软件包
链接:https://pan.baidu.com/s/1DyEXKowhW5Ryw0yEhZr3xA
提取码:obms
配置网络yum源
[root@localhost ~]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# wget http://mirrors.aliyun.com/repo/Centos-7.repo
[root@localhost yum.repos.d]# wget http://mirrors.aliyun.com/repo/epel-7.repo
[root@localhost yum.repos.d]# ls
Centos-7.repo epel-7.repo
[root@localhost yum.repos.d]# yum clean all
[root@localhost yum.repos.d]# yum makecache
关闭防火墙和SELinux
[root@localhost yum.repos.d]# iptables -F
[root@localhost yum.repos.d]# vim /etc/selinux/config
SELINUX=disabled
[root@localhost yum.repos.d]# setenforce 0
setenforce: SELinux is disabled
下载所需软件包
[root@localhost yum.repos.d]# cd /usr/local/
[root@localhost local]# ls
bin etc games include lib lib64 libexec man sbin share src
[root@localhost local]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.3.tar.gz
[root@localhost local]# wget https://artifacts.elastic.co/downloads/logstash/logstash-6.2.3.tar.gz
[root@localhost local]# wget https://artifacts.elastic.co/downloads/kibana/kibana-6.2.3-linux-x86_64.tar.gz
[root@localhost local]# yum install -y java-1.8*
[root@localhost local]# java -version
openjdk version "1.8.0_232"
OpenJDK Runtime Environment (build 1.8.0_232-b09)
OpenJDK 64-Bit Server VM (build 25.232-b09, mixed mode)
配置ElasticSearch
[root@localhost local]# tar -xvf elasticsearch-6.2.3.tar.gz
[root@localhost local]# useradd elasticsearch
[root@localhost local]# chown -R elasticsearch.elasticsearch /usr/local/elasticsearch-6.2.3
启动ElasticSearch
[root@localhost local]# su - elasticsearch
[elasticsearch@localhost ~]$ cd /usr/local/elasticsearch-6.2.3
[elasticsearch@localhost elasticsearch-6.2.3]$ ./bin/elasticsearch -d
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
[elasticsearch@localhost elasticsearch-6.2.3]$ netstat -pant
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN -
tcp 0 0 192.168.10.92:43054 151.101.110.222:443 ESTABLISHED -
tcp 0 0 192.168.10.92:43056 151.101.110.222:443 ESTABLISHED -
tcp 0 0 192.168.10.92:22 192.168.10.1:4351 ESTABLISHED -
tcp 0 0 192.168.10.92:22 192.168.10.1:4210 ESTABLISHED -
tcp 0 0 192.168.10.92:22 192.168.10.1:4337 ESTABLISHED -
tcp6 0 0 127.0.0.1:9200 :::* LISTEN 8128/java
tcp6 0 0 ::1:9200 :::* LISTEN 8128/java
tcp6 0 0 127.0.0.1:9300 :::* LISTEN 8128/java
tcp6 0 0 ::1:9300 :::* LISTEN 8128/java
tcp6 0 0 :::22 :::* LISTEN -
tcp6 0 0 ::1:25 :::* LISTEN -
[elasticsearch@localhost elasticsearch-6.2.3]$ curl localhost:9200
{
"name" : "WA_vN1j",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "mUzdeVDvTHq3vqYgr47IoA",
"version" : {
"number" : "6.2.3",
"build_hash" : "c59ff00",
"build_date" : "2018-03-13T10:06:29.741383Z",
"build_snapshot" : false,
"lucene_version" : "7.2.1",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
#若出现错误可以查看日志
[elasticsearch@localhost elasticsearch-6.2.3]$ cat /usr/local/elasticsearch-6.2.3/logs/elasticsearch.log
配置LogStash
[root@localhost local]# tar xvf logstash-6.2.3.tar.gz
[root@localhost local]# cd logstash-6.2.3
[root@localhost logstash-6.2.3]# ls
bin CONTRIBUTORS Gemfile lib logstash-core modules tools
config data Gemfile.lock LICENSE logstash-core-plugin-api NOTICE.TXT vendor
[root@localhost logstash-6.2.3]# vim vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns/grok-patterns
#添加以下配置
# Nginx log
WZ ([^ ]*)
NGINXACCESS %{IP:remote_ip} \- \- \[%{HTTPDATE:timestamp}\] "%{WORD:method} %{WZ:request} HTTP/%{NUMBER:httpversi
on}" %{NUMBER:status} %{NUMBER:bytes} %{QS:referer} %{QS:agent} %{QS:xforward}
创建LogStash配置文件
[root@localhost logstash-6.2.3]# vim default.conf
input {
beats {
port => "5044"
}
}
#数据过滤
filter {
grok {
match => {"message" => "%{NGINXACCESS}"}
}
geoip {
#Nginx 客户端 IP
source => "192.168.10.91"
}
}
#输出配置为本机的9200端口,这是ElasticSerach服务器的监听端口
output {
elasticsearch {
hosts => ["127.0.0.1:9200"]
}
}
~
~
"default.conf" [New] 24L, 377C written
启动LogStash
[root@localhost logstash-6.2.3]# nohup bin/logstash -f default.conf &
[1] 8602
[root@localhost logstash-6.2.3]# nohup: ignoring input and appending output to ‘nohup.out’
[root@localhost logstash-6.2.3]# tailf nohup.out
[root@localhost logstash-6.2.3]# netstat -pant | grep 5044
tcp6 0 0 :::5044 :::* LISTEN 8602/java
配置Kibana
[root@localhost local]# tar xvf kibana-6.2.3-linux-x86_64.tar.gz
[root@localhost local]# cd kibana-6.2.3-linux-x86_64
[root@localhost kibana-6.2.3-linux-x86_64]# ls
bin LICENSE.txt NOTICE.txt plugins ui_framework
config node optimize README.txt webpackShims
data node_modules package.json src
[root@localhost kibana-6.2.3-linux-x86_64]# cd config/
[root@localhost config]# ls
kibana.yml
[root@localhost config]# vim kibana.yml
server.host: "192.168.10.92"
启动Kibana
[root@localhost config]# cd ..
[root@localhost kibana-6.2.3-linux-x86_64]# nohup bin/kibana &
[1] 8319
[root@localhost kibana-6.2.3-linux-x86_64]# nohup: ignoring input and appending output to ‘nohup.out’
[root@localhost kibana-6.2.3-linux-x86_64]# tail -f nohup.out
[root@localhost kibana-6.2.3-linux-x86_64]# netstat -pant | grep 5601
tcp 0 0 192.168.10.92:5601 0.0.0.0:* LISTEN 8319/bin/../node/bi
通过http://192.168.10.92:5601/访问Kibana
配置Nginx客户端
安装Nginx
[root@localhost yum.repos.d]# cd /usr/local/
[root@localhost local]# yum install -y nginx
[root@localhost local]# systemctl start nginx
配置FileBeat
[root@localhost local]# tar xvf filebeat-6.2.3-linux-x86_64.tar.gz
[root@localhost local]# ls
bin filebeat-6.2.3-linux-x86_64 games lib libexec sbin src
etc filebeat-6.2.3-linux-x86_64.tar.gz include lib64 man share
[root@localhost local]# cd filebeat-6.2.3-linux-x86_64
[root@localhost filebeat-6.2.3-linux-x86_64]# ls
fields.yml filebeat.reference.yml kibana module NOTICE.txt
filebeat filebeat.yml LICENSE.txt modules.d README.md
[root@localhost filebeat-6.2.3-linux-x86_64]# vim filebeat.yml
- type: log
# Change to true to enable this prospector configuration.
enabled: true #将false修改为true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/nginx/*.log #将/var/log/*.log修改为/var/log/nginx/*.log
#output.elasticsearch: #将此行注释掉
# Array of hosts to connect to.
# hosts: ["localhost:9200"] #将此行注释掉
output.logstash: #取消此行注释
# The Logstash hosts
hosts: ["192.168.10.92:5044"] #取消此行注释并修改IP地址为ELK服务器地址
启动FileBeat
[root@localhost filebeat-6.2.3-linux-x86_64]# cd /usr/local/filebeat-6.2.3-linux-x86_64
[root@localhost filebeat-6.2.3-linux-x86_64]# nohup ./filebeat -e -c filebeat.yml &
[1] 8250
[root@localhost filebeat-6.2.3-linux-x86_64]# nohup: ignoring input and appending output to ‘nohup.out’
[root@localhost filebeat-6.2.3-linux-x86_64]# tailf nohup.out
收集日志
通过http://192.168.10.91/多访问几次Nginx