配置Nginx正则
[root@localhost patterns]# pwd
/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns
[root@localhost patterns]# cat nginx_access
URIPARAM1 [A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]]*
NGINXACCESS %{IPORHOST:client_ip} (%{USER:ident}|- ) (%{USER:auth}|-) \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} (%{NOTSPACE:request}|-)(?: HTTP/%{NUMBER:http_version})?|-)" %{NUMBER:status} (?:%{NUMBER:bytes}|-) "(?:%{URI:referrer}|-)" "%{GREEDYDATA:agent}"
[root@localhost kafka]# ./bin/kafka-topics.sh --create --zookeeper 192.168.88.8:2181 --replication-factor 2 --partitions 3 --topic nginx #创建nginx的topic
Created topic nginx.
[root@localhost kafka]# ./bin/kafka-console-consumer.sh --bootstrap-server 192.168.88.8:9092 --topic msg --from-beginning #消费
[root@localhost ~]# yum -y install nginx 安装nginx
[root@localhost ~]# systemctl start nginx #开启
[root@localhost ~]# tailf /var/log/nginx/access.log #查看日志
[root@localhost ~]# ab -n 100 -c 100 http://192.168.88.88/index.html #压测
配置filebeat文件
[root@localhost ~]# cat /etc/filebeat/filebeat.yml
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/messages
fields:
log_topics: msg
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
fields:
log_topics: nginx
#================================ Outputs =====================================
#================================ Outputs =====================================
output.kafka:
ebabled: true
hosts: ["192.168.88.8:9092","192.168.88.88:9092","192.168.88.99:9092"]
topic: '%{[fields][log_topics]}'
[root@localhost ~]# systemctl restart filebeat
[root@localhost ~]# tailf /var/log/filebeat/filebeat
消费
配置logstash
[root@localhost logstash]# cat /etc/logstash/conf.d/nginx.conf
[root@localhost ~]# cat /etc/logstash/conf.d/nginx.conf
input {
kafka {
bootstrap_servers => ["192.168.88.8:9092,192.168.88.88:9092,192.168.88.99:9092"]
group_id => "logstash"
topics => "msg"
consumer_threads => 5
}
}
filter {
json {
source => "message"
}
mutate {
remove_field => ["@version","fields","prospector","source","host","beat","input","offset","log"]
}
grok {
match => { "message" => "%{NGINXACCESS}" }
}
}
output {
elasticsearch {
hosts => "192.168.88.8:9200"
index => "nginx-%{+YYYY.MM.dd}"
}
}
[root@localhost patterns]# cat /etc/logstash/pipelines.yml | grep -v "^#" | sed '/^$/d'
- pipeline.id: msg
path.config: "/etc/logstash/conf.d/messages.conf"
- pipeline.id: nginx
path.config: "/etc/logstash/conf.d/nginx.conf"
[root@localhost ~]# chmod 777 /var/log -R
[root@localhost logstash]# systemctl restart logstash
[root@localhost logstash]# tailf /var/log/logstash/logstash-plain.log
[root@localhost logstash]# netstat -ntlp | grep 9600
[root@localhost ~]# ab -n 100 -c 100 http://192.168.88.88/index.html
[root@localhost kafka]# curl -X GET http://192.168.88.8:9200/_cat/indices?v #查看索引