使用docker-compose部署ELK
docker下载部署所需镜像
kibana 7.5.0 docker pull kibana:7.5.0
elasticsearch 7.5.0 docker pull elasticsearch:7.5.0
logstash 7.5.0 docker pull logstash:7.5.0
elasticsearch-head:5 docker pull mobz/elasticsearch-head:5
filebeat 7.5.0 docker pull registry.cn-hongkong.aliyuncs.com/hyn-beats/filebeat:7.5.0
vim docker-compose.yml
version: '3'
services:
elasticsearch:
image: elasticsearch:7.5.0
container_name: elasticsearch
environment:
- "cluster.name=elasticsearch" #设置集群名称为elasticsearch
- "discovery.type=single-node" #以单一节点模式启动
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" #设置使用jvm内存大小
- TZ=Asia/Shanghai
volumes:
- /work/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
- /work/elk/elasticsearch/data:/usr/share/elasticsearch/data #数据文件挂载
ports:
- 9200:9200
kibana:
image: kibana:7.5.0
container_name: kibana
links:
- elasticsearch:elasticsearch #可以用elasticsearch这个域名访问elasticsearch服务
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
environment:
- "elasticsearch.hosts=http://elasticsearch:9200" #设置访问elasticsearch的地址
- TZ=Asia/Shanghai
- I18N_LOCALE=zh-CN
ports:
- 5601:5601
logstash:
image: logstash:7.5.0
container_name: logstash
volumes:
- /work/elk/logstash/logstash-springboot.conf:/usr/share/logstash/pipeline/logstash.conf #挂载logstash的配置文件
- /work/elk/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
environment:
- TZ=Asia/Shanghai #环境设置时区
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
links:
- elasticsearch:elasticsearch #可以用es这个域名访问elasticsearch服务
ports:
- 5044:5044
新建文件夹
/work/elk/elasticsearch/data
/work/elk/elasticsearch/plugins
设置文件夹权限
chmod -R 777 data
chmod -R 777 plugins
vim /work/elk/logstash/logstash-springboot.conf
## logtash.conf
input {
beats {
port => "5044"
codec => "json"
}
}
output {
stdout { codec => rubydebug }
elasticsearch {
hosts => [ "{主机IP地址}:9200" ]
}
}
vim /work/elk/logstash/logstash.yml
http.host: "0.0.0.0"
# es-head连接es可能是连不上的,是因为es没有开启跨域(不安装es-head不需要增加下面的配置)
http.cors.enabled: true
http.cors.allow-origin: "*"
执行
docker-compose up -d
查看结果
docker ps
编写filebeat的docker-compose.yml
version: '3'
services:
filebeat:
image: store/elastic/filebeat:7.5.0 # 你下载的镜像的版本,
container_name: filebeat
restart: always
volumes:
# 对应你要映射的日志文件
- /work/ggfw/pw/:/work/ggfw/pw/
- /work/ggfw/cw/:/work/ggfw/cw/
- /opt/nginx/logs/:/opt/nginx/logs/
........ # 可以配置多个
# 下面的固定加的
- /work/elk/filebeat/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
network_mode: "host" # 可加可不加,由于项目中用dubbo,消费者出现了filebeat的网络ip
编写filebeat本地配置
vim /work/elk/filebeat/filebeat.docker.yml
#filebeat.config:
filebeat.inputs:
- type: log
enabled: true
paths:
- /work/ggfw/pw/app.log
fields:
service: pw
fields_under_root: true
- type: log
enabled: true
paths:
- /work/ggfw/cw/app.log
fields:
service: cw
fields_under_root: true
...... # 可以加多个日志文件,如果一个文件夹下有多个日志文件,也可以使用*.log
#日志输出配置(采用 logstash 收集日志,5044为logstash端口)
output.logstash:
hosts: ['{IP地址}:5044']
执行
docker-compose up -d
以上,服务器端配置完成,查看kabana,访问:
http://{IP地址}:5601/
在索引管理里就可以看到将所有指定的日志文件对应的索引了。
一顿添加操作后,可以在Discover里看到日志内容了。然后按照自己的想看的去筛选各个微服务的日志。可以进行保存,方便下次打开。