针对 docker的安装 ElasticSearch Kibana Logstash 的版本均为7.6.0
安装 ElasticSearch
docker pull elasticsearch:7.6.0 # 下载 elasticsearch images
docker run -d --user root --name elasticsearch \
--net host -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" \
--restart=always \
-v /home/elasticsearch/data:/usr/share/elasticsearch/data \
-v /home/elasticsearch/config:/usr/share/elasticsearch/config \
-v /home/elasticsearch/logs:/usr/share/elasticsearch/logs \
-v /home/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
elasticsearch:7.6.0
# 绑定 data config logs plugins 到宿主主机中,其中 /home/elasticsearch 目录为宿主主机中的目录可以更改
# 配置elasticsearch 密码 xpack是收费软件
vim /home/elasticsearch/config/elasticsearch.yml
# 添加如下配置:
# http.cors.enabled: true
# http.cors.allow-origin: "*"
# http.cors.allow-headers: Authorization
# xpack.security.enabled: true
# xpack.security.transport.ssl.enabled: true
保存进入elasticsearch 容器内部:
docker exec -it -u root elasticsearch /bin/sh
cd ./config && ./elasticsearch-setup-passwords interactive
# 会提示是否继续
y
# 然后依次输入密码:这儿会配置 elastic kibana logstash_system beats_system 这几个用户的密码
安装 Kibana
docker pull kibana:7.6.0 # 安装 kibana
docker run -d --user root \
--name kibana --restart=always \
--net host -p 5601:5601 \
-v /home/kibana/config:/usr/share/kibana/config \
-v /home/kibana/data:/usr/share/kibana/data \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
kibana:7.6.0 /usr/local/bin/kibana-docker --allow-root
# 绑定 config data 到宿主主机中
# 修改elasticsearch 连接地址,密码
vim /home/kibana/config/kibana.yml
# 添加如下配置
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://127.0.0.1:9200" ]
# elasticsearch.username: "elastic"
# elasticsearch.password: "123456"
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
# 注意,在配置了elastic 账号密码过后,访问 127.0.0.1:5601 时需要输入账号密码,这儿的账号密码也是 elastic
安装 Logstash
docker pull logstash:7.6.0 # 安装 Logstash
docker run -d -it --user root --restart=always \
--name logstash --net host \
-p 5044:5044 -p 9600:9600 \
-v /home/logstash/config:/usr/share/logstash/config \
-v /home/logstash/data:/usr/share/logstash/data \
-v /home/logstash/pipeline:/usr/share/logstash/pipeline \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
logstash:7.6.0
# 绑定 config data pipeline 这三个目录
# 设置elasticsearch 连接
vim /home/logstash/config/logstash.yml
logstash.config 内容
# 设置输入模式
input {
# tcp方法进行输入
tcp {
port => 9601 # 暴露的端口
codec => "json" # 以JSON的方式进行传输
}
}
# 对日志进行处理
filter {
ruby {
code => "event.set('timestamp', (event.get('@timestamp').time.localtime + 8*60*60).strftime('%Y-%m-%d %H:%M:%S'))" # 格式化时间
}
}
output {
# 配置ES
elasticsearch {
hosts => "127.0.0.1:9200"
index => "%{[appname]}-%{+YYYY.MM.dd}"
}
stdout {
# 以debug的方式运行,会在控制台打印日志信息,可以不要stdout这一段
codec => rubydebug { }
}
}
整和SpringBoot
整合 springboot 项目使用的是logback日志框架
导入需要的 logstash-logback jar包
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>4.11</version>
</dependency>
配置 logback xml文件
注意: appname 保持小写,es的index只支持小写
<!--配置 logstash-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.0.102:9601</destination>
<queueSize>1048576</queueSize>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
<customFields>{"appname":"${appname}"}</customFields>
</encoder>
</appender>