准备
下载镜像
docker pull elasticsearch:7.16.1
docker pull kibana:7.16.1
docker pull logstash:7.16.1
docker pull elastic/filebeat:7.16.1 # filebeat安装在日志收集机器上
创建网络
docker network create elknet
安装Elasticsearch
创建挂载目录
mkdir -p /opt/docker/elasticsearch
临时启动
docker run -d --name elasticsearch --net elknet -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.16.1
从容器内复制配置文件到宿主机,然后删除原来的容器
docker cp elasticsearch:/usr/share/elasticsearch/config/ /opt/docker/elasticsearch/config/
docker cp elasticsearch:/usr/share/elasticsearch/data/ /opt/docker/elasticsearch/data/
docker rm -f elasticsearch
正式启动
根据自己服务器情况添加 ES_JAVA_OPTS
内存分配
docker run -d --name elasticsearch --net elknet -p 9200:9200 -p 9300:9300 \
-e ES_JAVA_OPTS="-Xms2g -Xmx2g" \
-e "discovery.type=single-node" \
-v /opt/docker/elasticsearch/config:/usr/share/elasticsearch/config \
-v /opt/docker/elasticsearch/data:/usr/share/elasticsearch/data \
elasticsearch:7.16.1
# 查看运行日志
docker logs -f --tail=500 elasticsearch
启动成功后访问 ip:9200
返回信息如下就成功了:
{
"name" : "8367bcc5aae5",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "eFOEiwETTxaHm_2DFnr3oQ",
"version" : {
"number" : "7.16.1",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "5b38441b16b1ebb16a27c107a4c3865776e20c53",
"build_date" : "2021-12-11T00:29:38.865893768Z",
"build_snapshot" : false,
"lucene_version" : "8.10.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
设置用户密码
打配置文件
vim /opt/docker/elasticsearch/config/elasticsearch.yml
添加配置
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true
xpack.license.self_generated.type: basic
重启后进入容器执行设置用户名和密码的命令
docker restart elasticsearch
docker exec -it elasticsearch /bin/bash
elasticsearch-setup-passwords auto #随机密码
安装Kibana
创建挂载目录
mkdir -p /opt/docker/kibana
临时启动
docker run -d --name kibana --net elknet -p 5601:5601 kibana:7.16.1
从容器内复制配置文件到宿主机,然后删除原来的容器
docker cp kibana:/usr/share/kibana/config/ /opt/docker/kibana/config/
docker rm -f kibana
汉化, 配置elasticsearch密码
vim /opt/docker/kibana/config/kibana.yml
追加
i18n.locale: "zh-CN"
elasticsearch.username: "elastic"
elasticsearch.password: "123456"
正式启动
docker run -d --name kibana --net elknet -p 5601:5601 \
-v /opt/docker/kibana/config:/usr/share/kibana/config \
kibana:7.16.1
启动成功后访问 ip:5601
输入elastic对应的密码就可以访问kibana的页面了
也可以在kibana中修改密码
安装Logstash
创建挂载目录
mkdir -p /opt/docker/logstash
临时启动,复制文件后再删除
docker run -it -d -p 5044:5044 --name logstash --net elknet logstash:7.16.1
docker cp logstash:/usr/share/logstash/config/ /opt/docker/logstash/config/
docker rm -f logstash
配置
vim /opt/docker/logstash/config/logstash.yml
覆盖配置
http.host: "0.0.0.0"
path.config: /usr/share/logstash/config/my.conf
vim /opt/docker/logstash/config/my.conf
添加输入、输出、过滤器
input {
beats {
port => 5044
client_inactivity_timeout => 36000
}
}
filter {
grok {
match => {"message" => "%{TIMESTAMP_ISO8601:logDatetime}\s*\[%{DATA:logThread}\]\s*%{LOGLEVEL:logLevel}\s*%{JAVACLASS:logJavaClass}-"}
}
date { #将日志中的时间覆盖到es的时间
match => ["logDate", "yyyy-MM-dd HH:mm:ss,SSS"]
target => "@timestamp"
}
mutate{
remove_field => ["agent"]
remove_field => ["@version"]
remove_field => ["input"]
remove_field => ["ecs"]
remove_field => ["host"]
}
}
output {
if "client-api-prod" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "client_api_prod-%{+YYYY.MM}"
user => "elastic"
password => "xxx"
}
}else if "admin-api-prod" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "admin_api_prod-%{+YYYY.MM}"
user => "elastic"
password => "xxx"
}
}else if "client-api-dev" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "client_api_dev-%{+YYYY.MM}"
user => "elastic"
password => "xxx"
}
}else if "admin-api-dev" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "admin_api_dev-%{+YYYY.MM}"
user => "elastic"
password => "xxx"
}
}else if "admin-api-test" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "admin_api_test-%{+YYYY.MM}"
user => "elastic"
password => "xxx"
}
}else if "admin-api-test" == [fields][appName]{
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "admin_api_test-%{+YYYY.MM}"
user => "elastic"
password => "xxxx"
}
}
}
filter
中 grok
可以根据自己的需要配置日志正则,
date
配置的是时间覆盖
mutate
配置去除的字段,不在es中创建字段
output
中的判断 [fields][appName]
,需要和filebeat中设置的字段名一样
正式启动
根据自己服务器情况添加 ES_JAVA_OPTS
内存分配
docker run -it -d -p 5044:5044 --name logstash --net elknet \
-e ES_JAVA_OPTS="-Xms1g -Xmx1g" \
-v /opt/docker/logstash/config/:/usr/share/logstash/config/ \
logstash:7.16.1
安装Filebeat
启动
docker run -d -u root --name filebeat \
-v /app/logs/client-api:/var/log/client-api:rw \
-v /app/logs/admin-api:/var/log/admin-api:rw \
elastic/filebeat:7.16.1
filebeat需要启动在需要收集日志机器上
docker exec -it filebeat /bin/bash
rm -rf filebeat.yml
vi filebeat.yml
加入下面配置
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/client-api/*.log
multiline.pattern: '^20' #多行匹配规则
multiline.negate: true #将不匹配的规则的行合并在一起
multiline.match: after #合并到匹配规则的上一行末尾
fields: #自定义字段
appName: "client-api-dev"
- type: log
enabled: true
paths:
- /var/log/admin-api/*.log
fields:
appName: "admin-api-dev"
output.logstash:
hosts: ["xx.1xx.x2.x3:5044"]
enabled: true
重启
exit
docker restart filebeat
docker logs -f --tail=500 filebeat