1.安装elasticsearch,最新版本可到官网查
docker run -d --name myelsticsearch -p 9200:9200 -p 9300:9300 elasticsearch:7.6.2
2.安装kibana
docker run -d --name mykibana -p 5601:5601 kibana:7.6.2
进入到容器内部:docker exec -it -u root kibana容器id
修改配置文件,绕过x-pack的安全检查,
vi /usr/share/kibana/config/ kibana.yml
修改
//--------------------------------------kibana.yml---------------------------------------------------------------------
elasticsearch.url: http://elasticsearch的IP:9200; xpack.monitoring.ui.container.elasticsearch.enabled: false
//-----------------------------------------------------------------------------------------------------------
重启kibana容器:docker restart kibana容器id
3.安装logstash
docker run -d --name mylogstash -p 5044:5044 logstash:7.6.2
进入容器: docker exec -it -u root logstash容器Id
修改配置文件: vi /usr/share/logstash/pipeline/logstash.conf
修改内容:
//-------------------------------------------------------logstash.conf----------------------------------------------------
input {
tcp {
port => 5044
codec => json_lines
}
}
output{
elasticsearch {
hosts => ["elasticsearch服务的Ip:9200"]
action => "index"
index => "%{[appname]}"
}
stdout { codec => rubydebug }
//-----------------------------------------------------------------------------------------------------------
修改好后重启logstash容器:docker restart logstash容器Id
4.spring cloud项目端:
1) resource下新建logback-spring.xml
//----------------------------------------------logback-spring.xml-------------------------------------------------------------
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!--定义日志的根目录 -->
<property name="LOG_HOME" value="logs"/>
<!-- Console 输出设置 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<param name="Encoding" value="UTF-8"/>
<!--logstash服务器ip:tcp端口-->
<destination>100.101.102.143:5044</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" />
</appender>
<!-- 按照每天生成日志文件 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志文件输出的文件名-->
<fileNamePattern>${LOG_HOME}/dscc.%d{yyyy-MM-dd}.log</fileNamePattern>
<!--只保留最近90天的日志-->
<maxHistory>90</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- 异步输出 -->
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>512</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="FILE"/>
</appender>
<logger name="org.apache.ibatis.cache.decorators.LoggingCache" level="DEBUG" additivity="false">
<appender-ref ref="CONSOLE"/>
</logger>
<logger name="org.springframework.boot" level="ERROR"/>
<root level="INFO">
<!--<appender-ref ref="ASYNC"/>-->
<appender-ref ref="FILE"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="LOGSTASH"/>
</root>
</configuration>
//-----------------------------------------------------------------------------------------------------------
2)pom.xml 引入jar
//----------------------------------------pom.xml-------------------------------------------------------------------
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.2</version>
</dependency>
//-----------------------------------------------------------------------------------------------------------
访问kibana地址 IP:5601即可