EFK收集NGINX线程

配置Nginx正则

[root@localhost patterns]# pwd
/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns
[root@localhost patterns]# cat nginx_access
URIPARAM1 [A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]]*
NGINXACCESS %{IPORHOST:client_ip} (%{USER:ident}|- ) (%{USER:auth}|-) \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} (%{NOTSPACE:request}|-)(?: HTTP/%{NUMBER:http_version})?|-)" %{NUMBER:status} (?:%{NUMBER:bytes}|-) "(?:%{URI:referrer}|-)" "%{GREEDYDATA:agent}"
[root@localhost kafka]# ./bin/kafka-topics.sh --create --zookeeper 192.168.88.8:2181 --replication-factor 2 --partitions 3 --topic nginx                           #创建nginx的topic   
Created topic nginx.
[root@localhost kafka]# ./bin/kafka-console-consumer.sh --bootstrap-server 192.168.88.8:9092 --topic msg --from-beginning   #消费
[root@localhost ~]# yum -y install nginx  安装nginx
[root@localhost ~]# systemctl start nginx   #开启
[root@localhost ~]# tailf /var/log/nginx/access.log   #查看日志
[root@localhost ~]# ab -n 100 -c 100 http://192.168.88.88/index.html   #压测

配置filebeat文件

[root@localhost ~]# cat /etc/filebeat/filebeat.yml
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/messages
  fields:
    log_topics: msg
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  fields:
    log_topics: nginx
#================================ Outputs =====================================
#================================ Outputs =====================================
output.kafka:
  ebabled: true
  hosts: ["192.168.88.8:9092","192.168.88.88:9092","192.168.88.99:9092"]
  topic: '%{[fields][log_topics]}'
[root@localhost ~]# systemctl restart filebeat
[root@localhost ~]# tailf /var/log/filebeat/filebeat

消费


配置logstash


[root@localhost logstash]# cat /etc/logstash/conf.d/nginx.conf
[root@localhost ~]# cat /etc/logstash/conf.d/nginx.conf
input {
        kafka {
                bootstrap_servers => ["192.168.88.8:9092,192.168.88.88:9092,192.168.88.99:9092"]
                group_id => "logstash"
                topics => "msg"
                consumer_threads => 5
        }
}
filter {
        json {
                source => "message"
        }
        mutate {
                remove_field => ["@version","fields","prospector","source","host","beat","input","offset","log"]
        }
        grok {
                match => { "message" => "%{NGINXACCESS}" }
        }
}
output {
        elasticsearch {
                hosts => "192.168.88.8:9200"
                index => "nginx-%{+YYYY.MM.dd}"
        }
}

[root@localhost patterns]# cat /etc/logstash/pipelines.yml | grep -v "^#" | sed '/^$/d'
- pipeline.id: msg
  path.config: "/etc/logstash/conf.d/messages.conf"
- pipeline.id: nginx
  path.config: "/etc/logstash/conf.d/nginx.conf"
[root@localhost ~]# chmod 777 /var/log -R
[root@localhost logstash]# systemctl restart logstash
[root@localhost logstash]# tailf /var/log/logstash/logstash-plain.log
[root@localhost logstash]# netstat -ntlp | grep 9600
[root@localhost ~]# ab -n 100 -c 100 http://192.168.88.88/index.html
[root@localhost kafka]# curl -X GET http://192.168.88.8:9200/_cat/indices?v    #查看索引
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

云原生解决方案

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值