ELK日志分析
实验环境
ELK服务器:192.168.10.103
Nginx服务器:192.168.10.104
1、准备工作
配置好网络yum源
# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
#wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
# cd /etc/yum.repos.d/
# yum -y instal tree
关闭防火墙
# systemctl disable firewalld
关闭SELINUX
SELINUX=disabled
2、下载并安装软件包
# mkdir /elk;cd /elk
# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.3.tar.gz
# wget https://artifacts.elastic.co/downloads/logstash/logstash-6.2.3.tar.gz
# wget https://artifacts.elastic.co/downloads/kibana/kibana-6.2.3-linux-x86_64.tar.gz
全部解压缩
#ls -l
# chown -R root.root kibana-6.2.3-linux-x86_64
# cp -a elasticsearch-6.2.3 /usr/local/
# cp -a kibana-6.2.3-linux-x86_64 /usr/local/
# cp -a logstash-6.2.3 /usr/local/
3、安装JDK环境工具
# yum -y install java-1.8*
4、配置elasticsearch:
创建用户
# useradd elasticsearch
添加所有者所有组
# chown -R elasticsearch.elasticsearch /usr/local/elasticsearch-6.2.3/
切换到用户
# su - elasticsearch
用户进入
$ cd /usr/local/elasticsearch-6.2.3/
启动
$ ./bin/elasticsearch -d
查看是否启动成功(等待一下)
$ netstat -antp
测试是否可以正常访问
$ curl localhost:9200
5、配置 logstash
logstash中 grok 的正则配置
[root@192 elasticsearch-6.2.3]# cd /usr/local/logstash-6.2.3/
[root@192 logstash-6.2.3]# vim vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns/grok-patterns
# Nginx log
WZ([^]*)
NGINXACCESS%{IP:remote_ip} \- \- \[%{HTTPDATE:timestamp}\] "%{WORD:method} %{WZ:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:status} %{NUMBER:bytes| %{QS:referer} %{QS:agent} %{QS:xforward}
创建 logstash
# vim /usr/local/logstash-6.2.3/default.conf
input {
beats {
port => "5044"
}
}
#数据过滤
filter {
grok {
match => { "message" => "%{NGINXACCESS}"}
}
geoip {
# nginx 客户端 ip
source => "192.168.10.104"
}
}
#输出配置为本机的9200端口,这是 ElasticSerch 服务的监听端口
output {
elasticsearch {
hosts => ["127.0.0.1:9200"]
}
}
进入/usr/local/logstash-6.2.3/ 目录下启动
后台启动logstash:# nohup bin/logstash -f default.conf &
[root@192 logstash-6.2.3]# nohup bin/logstash -f default.conf &
[1] 3283
[root@192 logstash-6.2.3]# nohup: 忽略输入并把输出追加到"nohup.out"
查看启动日志:# tailf nohup.out
查看端口是否启动:# netstat -antp | grep 5044
#netstat -antp
6、配置 kibana
打开kibana配置文件# cd /usr/local/kibana-6.2.3-linux-x86_64/config/kibana.yml
# vim /usr/local/kibana-6.2.3-linux-x86_64/config/kibana.yml
#server.host: "localhost"
修改为
server.host: "192.168.10.109"
进入kibana目录下/usr/local/kibana-6.2.3-linux-x86_64/
执行启动命令:# nohup bin/kibana &
[1] 3480
[root@192 kibana-6.2.3-linux-x86_64]# nohup: 忽略输入并把输出追加到"nohup.out"
查看启动日志:
查看端口是否启动:# netstat -napt|grep 5601
tcp 0 0 192.168.10.109:5601 0.0.0.0:* LISTEN 3480/bin/../node/bi
测试: 浏览器访问:192.168.10.103:5601
7、nginx 客户端配置
yum 安装二进制 nginx 软件包
# yum -y install nginx
# systemctl start nginx
# systemctl enable nginx 开机自启
下载filebeat 解压到 /usr/local/
# wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.2.3-linux-x86_64.tar.gz
# tar -xf filebeat-6.2.3-linux-x86_64.tar.gz
打开/usr/local/filebeat-6.2.3-linux-x86_64/filebeat.yml
# vim /usr/local/filebeat-6.2.3-linux-x86_64/filebeat.yml
enabled: false 修改为 true
paths:
- /var/log/*.log 修改为/var/log/nginx/*.log
#output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["localhost:9200"] 注释掉
output.logstash:
# The Logstash hosts
hosts: ["192.168.10.104:5044"] 取消注释
切换到/usr/local/filebeat-6.2.3-linux-x86_64
#cd /usr/local/filebeat-6.2.3-linux-x86_64
后台启动filebeat:# nohup ./filebeat -e -c filebeat.yml &
查看日志:tailf nohup.out
多访问几次nginx
访问 Kibana ,点击左上角Discover,就可以看到已被ELK收集了
输入 logstash-*,点击"Next step"
选择 Time Filter,在点击"Create index pattern"
beat:# nohup ./filebeat -e -c filebeat.yml &
查看日志:tailf nohup.out
多访问几次nginx
访问 Kibana ,点击左上角Discover,就可以看到已被ELK收集了
输入 logstash-*,点击"Next step"
选择 Time Filter,在点击"Create index pattern"
自行创建日志