部署ES
docker pull elasticsearch:7.8.0
docker network create elk
mkdir /root/data/es
chmod 777 /root/data/es
#配置文件/root/data/es/elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
xpack.security.enabled: true
#启动
docker run -d --name es -p 9200:9200 -p 9300:9300 --network elk --network-alias es -e discovery.type=single-node -v /root/data/es:/usr/share/elasticsearch/data -v /root/data/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml --privileged elasticsearch:7.8.0
#进入容器,配置密码
docker exec -it es bash
bin/elasticsearch-setup-passwords interactive
部署Kibana
#配置文件创建/root/kibana.yml
cat <<eof >kibana.yml
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://es:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "elastic"
elasticsearch.password: "#####"
eof
docker run -d -p 5601:5601 --name kibana --network elk --network-alias kibana -v /root/kibana.yml:/usr/share/kibana/config/kibana.yml kibana:7.8.0
部署logstash,启动gelf日志驱动
#配置/root/logstash.conf
cat <<eof >logstash.conf
input {
gelf {
use_udp => true
port_tcp => 12202
}
}
output {
elasticsearch {
hosts => ["es:9200"]
index => "log-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "######"
}
}
eof
#启动
docker run -d --name logstash --network elk --network-alias logstash -v /root/logstash.conf:/usr/share/logstash/config/logstash.conf -v /root/logstash.yml:/usr/share/logstash/config/logstash.yml --privileged logstash:7.8.0 logstash -f /usr/share/logstash/config/logstash.conf
测试容器日志连接Logstash,由Logstash将日志推送到es
docker run -d --name test -p 80:80 --log-driver gelf --log-opt gelf-address=udp://127.0.0.1:12201 --log-opt tag=app_httpd httpd
curl 127.0.0.1
登录KIBANA,使用ES的用户名与密码认证
127.0.0.1:5601
在DISCOVER里创建新的index pattern。然后即可看到测试容器上报来的日志信息。
完工!
附docker-compose.yml脚本,方便部署swarm集群环境
version: '3.6'
services:
es:
hostname: es
image: 10.41.10.81:5000/elasticsearch:7.8.0
volumes:
- /home/data/es:/usr/share/elasticsearch/data
- /home/data/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
environment:
- discovery.type=single-node
ports:
- "9200:9200"
deploy:
placement:
constraints:
- node.labels.role == data
replicas: 1
networks:
- middleware
kibana:
hostname: kibana
image: kibana:7.8.0
volumes:
- /home/data/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- "5601:5601"
deploy:
placement:
constraints:
- node.labels.role == data
replicas: 1
networks:
- middleware
logstash:
hostname: logstash
image: 10.41.10.81:5000/logstash:7.8.0
privileged: true
volumes:
- /home/data/logstash/logstash.conf:/usr/share/logstash/config/logstash.conf
- /home/data/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
command: logstash -f /usr/share/logstash/config/logstash.conf
ports:
- 12201:12201/udp
deploy:
mode: global
networks:
- middleware
test:
hostname: test
image: httpd
ports:
- "8888:80"
deploy:
mode: global
networks:
- middleware
logging:
driver: gelf
options:
gelf-address: udp://127.0.0.1:12201
tag: test
networks:
middleware:
external: true