Docker安装ELK
为了让 es 和 kibana 容器互联。这里先创建一个网络:
docker network create es-net
ES
# 启动eleasticsearch
docker run -d --name my-es -e "ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" -v es-data:/D/DockerMount/elasticsearch/data -v es-plugins:/D/DockerMount/elasticsearch/plugins --privileged --network es-net -p 9200:9200 -p 9300:9300 elasticsearch:7.6.2
# 启动成功访问localhost:9200
kibana
# 启动 kibana
docker run -d --name my-kibana -e ELASTICSEARCH_HOSTS=http://my-es:9200 --network=es-net -p 5601:5601 kibana:7.6.2
# 启动成功访问localhost:5601
logstash
# 启动logstash
docker run -d --name my-logstash -e ELASTICSEARCH_HOSTS=http://my-es:9200 --network=es-net -p 12501:12501 -p 9600:9600 -p 12502:12502 -p 12503:12503 -p 12504:12504 -p 12505:12505 -p 12506:12506 -p 12507:12507 -p 12508:12508 -p 12509:12509 logstash:7.6.2
# 修改logstash配置
# 修改logstash.yml配置 在config路径下
vi config/logstash.yml
# 把xpack.monitoring.elasticsearch.hosts修改为"http://my-es:9200"
# 注意前面有空格 因为是yml格式
# 修改logstash配置文件logstash.conf 在pipeline下面
vi pipeline/logstash.conf
input {
tcp {
#模式选择为server
mode => "server"
host => "0.0.0.0"
#这里的端口号要和logback-spring.xml里面的destination匹配
port => 12501
# 设定type以区分每个输入源
type => "gateway"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12502
type => "system"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12503
type => "auth"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12504
type => "file"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12505
type => "gen"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12506
type => "job"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12507
type => "monitor"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12508
type => "chat"
codec => json_lines
}
}
output {
if [type]=="gateway"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-gateway"
}
}
if [type]=="system"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-system"
}
}
if [type]=="auth"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-auth"
}
}
if [type]=="file"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-file"
}
}
if [type]=="gen"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-gen"
}
}
if [type]=="job"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-job"
}
}
if [type]=="job"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-monitor"
}
}
if [type]=="chat"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-chat"
}
}
stdout {
codec => rubydebug { }
}
}
Docker安装ELK
为了让 es 和 kibana 容器互联。这里先创建一个网络:
docker network create es-net
ES
# 启动eleasticsearch
docker run -d --name my-es -e "ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" -v es-data:/D/DockerMount/elasticsearch/data -v es-plugins:/D/DockerMount/elasticsearch/plugins --privileged --network es-net -p 9200:9200 -p 9300:9300 elasticsearch:7.6.2
# 启动成功访问localhost:9200
kibana
# 启动 kibana
docker run -d --name my-kibana -e ELASTICSEARCH_HOSTS=http://my-es:9200 --network=es-net -p 5601:5601 kibana:7.6.2
# 启动成功访问localhost:5601
logstash
# 启动logstash
docker run -d --name my-logstash -e ELASTICSEARCH_HOSTS=http://my-es:9200 --network=es-net -p 12501:12501 -p 9600:9600 -p 12502:12502 -p 12503:12503 -p 12504:12504 -p 12505:12505 -p 12506:12506 -p 12507:12507 -p 12508:12508 -p 12509:12509 logstash:7.6.2
# 修改logstash配置
# 修改logstash.yml配置 在config路径下
vi config/logstash.yml
# 把xpack.monitoring.elasticsearch.hosts修改为"http://my-es:9200"
# 注意前面有空格 因为是yml格式
# 修改logstash配置文件logstash.conf 在pipeline下面
vi pipeline/logstash.conf
input {
tcp {
#模式选择为server
mode => "server"
host => "0.0.0.0"
#这里的端口号要和logback-spring.xml里面的destination匹配
port => 12501
# 设定type以区分每个输入源
type => "gateway"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12503
type => "auth"
codec => json_lines
}
tcp {
mode => "server"
host => "0.0.0.0"
port => 12508
type => "chat"
codec => json_lines
}
}
output {
if [type]=="gateway"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-gateway"
}
}
if [type]=="auth"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-auth"
}
}
if [type]=="chat"{
elasticsearch {
hosts => "my-es:9200"
index => "springboot-logstash-chat"
}
}
stdout {
codec => rubydebug { }
}
}
# 修改完后保存退出
# 检测配置文件是否正确
bin/logstash -f pipeline/logstash.conf -t
# 显示Configure OK即为正常
# 重新启动logstash
# 启动成功访问localhost:9600
SPRING - MAVEN依赖
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
logback-spring.xml配置
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<springProperty scope="context" name="mall-admin" source="spring.application.name"/>
<!-- 日志在工程中的输出位置 -->
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${spring.application.name}"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN" value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!--logstash配置-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>127.0.0.1:5044</destination>
<!-- 日志输出编码 -->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"logLevel": "%level",
"serviceName": "${mall-getway:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>-->
</appender>
<root level="INFO">
<appender-ref ref="LOGSTASH"/>
<appender-ref ref="CONSOLE"/>
</root>
</configuration>
# 然后在yml配置文件上加上配置
logging:
config: classpath:logback-spring.xml
启动项目可以看到logstash的日志,这时候说明已经采集上去了
然后访问kibana查看localhost:5601
SPRING - MAVEN依赖
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
logback-spring.xml配置
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<springProperty scope="context" name="mall-admin" source="spring.application.name"/>
<!-- 日志在工程中的输出位置 -->
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${spring.application.name}"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN" value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!--logstash配置-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>127.0.0.1:5044</destination>
<!-- 日志输出编码 -->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"logLevel": "%level",
"serviceName": "${gateway:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>-->
</appender>
<root level="INFO">
<appender-ref ref="LOGSTASH"/>
<appender-ref ref="CONSOLE"/>
</root>
</configuration>
# 然后在yml配置文件上加上配置
logging:
config: classpath:logback-spring.xml
启动项目可以看到logstash的日志,这时候说明已经采集上去了
然后访问kibana查看localhost:5601