EFK实验架构图:
3台es+file,3台kafka
192.168.233.10 es+file
192.168.233.41 es+file
192.168.233.42 es+file
192.168.233.71 kafka
192.168.233.72 kafka
192.168.233.73 kafka
在es1主机上解压filebeat
cd filebeat
安装nginx服务
cd /usr/local/filebeat
vim filebeat.yml
- type: log
enabled: true
paths:
- /usr/local/nginx/logs/access.log
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error_log
tags: ["error"]
#添加输出到 Kafka 的配置
output.kafka:
enabled: true
hosts: ["192.168.233.10:9092","192.168.233.71:9092","192.168.233.72:9092"] #指定 Kafka 集群配置
topic: "nginx" #指定 Kafka 的 topic
#启动 filebeat
./filebeat -e -c filebeat.yml
3.部署 ELK,在 Logstash 组件所在节点上新建一个 Logstash 配置文件
cd /etc/logstash/conf.d/
vim kafka.conf
input {
kafka {
bootstrap_servers => "192.168.233.10:9092,192.168.233.71:9092,192.168.233.72:9092"
#kafka集群地址
topics => "nginx"
#拉取的kafka的指定topic
type => "nginx_kafka"
#指定 type 字段
codec => "json"
#解析json格式的日志数据
auto_offset_reset => "latest"
#拉取最近数据,earliest为从头开始拉取
decorate_events => true
#传递给elasticsearch的数据额外增加kafka的属性数据
}
}
output {
if "access" in [tags] {
elasticsearch {
hosts => ["192.168.233.71:9200","192.168.233.72:9200"]
index => "nginx_access-%{+YYYY.MM.dd}"
}
}
if "error" in [tags] {
elasticsearch {
hosts => ["192.168.233.12:9200","192.168.233.13:9200"]
index => "nginx_error-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
#启动 logstash
logstash -f kafka.conf
if "error" in [tags] {
elasticsearch {
hosts => ["192.168.233.71:9200","192.168.233.72:9200"]
index => "nginx_error-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
#启动 logstash
logstash -f kafka.conf
4.浏览器访问 http://192.168.233.71:9100