基于docker搭建ELK分布式日志系统
一、环境准备
1.安装docker
1.使用官方安装脚本自动安装
安装命令
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
也可以使用国内 daocloud 一键安装命令:
curl -sSL https://get.daocloud.io/docker | sh
2.手动安装
# 安装依赖包
sudo apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
# 添加 Docker 的官方 GPG 密钥
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# 验证您现在是否拥有带有指纹的密钥
sudo apt-key fingerprint 0EBFCD88
# 设置稳定版仓库
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
# 更新
sudo apt-get update
# 安装最新的Docker-ce
sudo apt-get install docker-ce
# 启动
sudo service docker start
2.安装docker-compose
#下载运行文件
sudo curl -L "https://github.com/docker/compose/releases/download/1.28.6/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
#配置权限
sudo chmod +x /usr/local/bin/docker-compose
3. 下载Elasticsearch、Kibana、Logstash
docker search elasticsearch
docker pull elasticsearch:7.6.0
docker search kibana
docker pull kibana:7.6.0
docker search logstash
docker pull logstash:7.6.0
二、环境配置
1.创建目录
mkdir -p /elk/elk
注意:以下文件全都在/elk/elk路径下
2. logstash-springboot.conf配置文件
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
}
}
output {
elasticsearch {
hosts => "es:9200"
index => "springboot-logstash-%{+YYYY.MM.dd}"
}
}
3.docker-compose配置文件
version: "2.2"
volumes:
data:
config:
plugin:
networks:
es:
services:
elasticsearch:
image: elasticsearch:7.6.0
container_name: elk-es
ports:
- "9200:9200"
- "9300:9300"
networks:
- "es"
environment:
- "discovery.type=single-node"
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
volumes:
- data:/usr/share/elasticsearch/data
- config:/usr/share/elasticsearch/config
- /elk/elk/ik-7.6.0:/usr/share/elasticsearch/plugins/ik-7.6.0 #ik分词器
kibana:
image: kibana:7.6.0
container_name: elk-kibana
ports:
- "5601:5601"
networks:
- "es"
volumes:
- /elk/elk/kibana.yml:/usr/share/kibana/config/kibana.yml
logstash:
image: logstash:7.6.0
container_name: logstash
networks:
- "es"
volumes:
- /elk/elk/logstash-springboot.conf:/usr/share/logstash/pipeline/logstash.conf #挂载logstash的配置文件
depends_on:
- elasticsearch #kibana在elasticsearch启动之后再启动
links:
- elasticsearch:es #可以用es这个域名访问elasticsearch服务
ports:
- 4560:4560
4.Kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
三、SpringBoot项目配置
1.添加logstash依赖
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
2.spring-logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- logstash地址,从 application.yml 中获取-->
<springProperty scope="context" name="LOGSTASH_ADDRESS" source="logstash.address"/>
<springProperty scope="context" name="APPLICATION_NAME" source="spring.application.name"/>
<!--日志在工程中的输出位置-->
<property name="LOG_FILE" value="/opt/web-shell/logging"/>
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
<conversionRule conversionWord="wex"
converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
<conversionRule conversionWord="wEx"
converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
<!--控制台的日志输出样式-->
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!--文件-->
<appender name="fileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 多JVM同时操作同一个日志文件 -->
<Prudent>false</Prudent>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>
${LOG_FILE}.%d{yyyy-MM-dd}.log
</FileNamePattern>
<!-- 日志最大的历史 10天 -->
<maxHistory>15</maxHistory>
</rollingPolicy>
<layout class="ch.qos.logback.classic.PatternLayout">
<Pattern>
%d{yyyy-MM-dd HH:mm:ss} %-5level logger{39} -%msg%n
</Pattern>
</layout>
</appender>
<!--输出到logstash的appender-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--可以访问的logstash日志收集端口-->
<destination>${LOGSTASH_ADDRESS}</destination>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
<pattern>
{
"app": "${APPLICATION_NAME}",
"level": "%level",
"thread": "%thread",
"logger": "%logger{50}",
"message": "%msg"
}
</pattern>
</pattern>
<stackTrace>
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
<maxDepthPerThrowable>100</maxDepthPerThrowable>
<rootCauseFirst>true</rootCauseFirst>
<inlineHash>true</inlineHash>
</throwableConverter>
</stackTrace>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="console"/>
<appender-ref ref="fileAppender"/>
<appender-ref ref="LOGSTASH"/>
</root>
</configuration>
3. 配置logStash地址
# 对应spring-logback.xml里面的logstash.address
logstash.address=172.18.70.210:4560
四、启动elk
1.启动命令
# 启动elk
docker-compose up -d
# 查看启动日志
docker-compose logs -f
2. 在logstash中安装json_lines插件
# 进入logstash容器
docker exec -it logstash /bin/bash
# 进入bin目录
cd /bin/
# 安装插件
logstash-plugin install logstash-codec-json_lines
# 退出容器
exit
# 重启logstash服务
docker restart logstash
3.访问
# 1.重新启动elk
# 2.查看ip
ifconfig
# 3.ip:5601 访问kibana