一、 下载Docker
mac机器下载地址: https://download.docker.com/mac/stable/Docker.dmg
安装完成后在mac控制台执行:
docker network create --driver bridge --subnet 192.168.0.0/24 --gateway 192.168.0.1 mynet
docker常用命令:
docker images
docker rm ******
docker rmi *****
docker ps -a
docker log -f ****
本地docker push到的方法
https://www.cnblogs.com/yoyoketang/p/11923263.html
二, 安装elasticsearch
[root@node1 docker]# docker run -d --name elasticsearch --net mynet --ip 192.168.0.100 -p 9200:9200 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx256m" docker.elastic.co/elasticsearch/elasticsearch:7.10.1
docker中查看是否启动:
浏览器查看是否启动正常:
三,安装kinaba
[root@node1 docker]# docker run -d --name kibana --net mynet -e ELASTICSEARCH_HOSTS=http://192.168.0.100:9200 -p 5601:5601 docker.elastic.co/kibana/kibana:7.10.1
docker中查看是否启动正常:
浏览器验证是否能打开:
四,安装fluentd
构建fluentd镜像
https://github.com/jinzhengquan/fluentd-boot
[root@node1 docker]# docker build -t my-fluentd .
[root@node1 docker]# docker run -d --net mynet -p 24224:24224 --name="fluentd" my-fluentd:latest
配置fluentd到es
主要是添加es的host和端口
<source>
@type forward
@id input1
@label @mainstream
port 24224
</source>
<filter **>
@type stdout
</filter>
<label @mainstream>
<match docker.**>
@type file
@id output_docker1
path /fluentd/log/docker.*.log
symlink_path /fluentd/log/docker.log
append true
time_slice_format %Y%m%d
time_slice_wait 1m
time_format %Y%m%dT%H%M%S%z
</match>
<match **>
@type file
@id output1
path /fluentd/log/data.*.log
symlink_path /fluentd/log/data.log
append true
time_slice_format %Y%m%d
time_slice_wait 10m
time_format %Y%m%dT%H%M%S%z
</match>
<match **>
@type copy
<store>
@type elasticsearch
host 192.168.0.100
port 9200
logstash_format true
logstash_prefix celery
logstash_dateformat %Y%m%d
include_tag_key true
type_name access_log
tag_key @log_name
flush_interval 3s
suppress_type_name true
</store>
<store>
@type stdout
</store>
</match>
</label>
如果需要安装ruby2.5
https://blog.csdn.net/llwy1428/article/details/93037393
五,配置kibana
这里pattern和fluentd里面的logstash_prefix保持一致
创建完成:
查看日志:
七,springboot 配置
<?xml version="1.0" encoding="UTF-8"?>
<!--该日志将日志级别不同的log信息保存到不同的文件中 -->
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<springProperty name="logLevel" source="logback.level" defaultValue="INFO"/>
<!-- 日志在工程中的输出位置 -->
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${springAppName}"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!-- 为logstash输出的JSON格式的Appender -->
<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>127.0.0.1:24224</destination>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"tags": ["${springAppName}"],
"project": "service-${springAppName}",
"timestamp": "%date{\"yyyy-MM-dd'T'HH:mm:ss,SSSZ\"}",
"log_level": "%level",
"thread": "%thread",
"message": "%message",
"stack_trace": "%exception",
"request_uri": "%X{X-SERVICE-REQUEST-URI}",
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<!-- 日志输出级别 -->
<root level="INFO">
<appender-ref ref="console"/>
<appender-ref ref="logstash"/>
</root>
</configuration>
参考资料:
https://www.cnblogs.com/orange2016/p/14352886.html
http://blog.321aiyi.com/article/392
https://www.cnblogs.com/tylercao/p/7803520.html
https://blog.csdn.net/anxianfeng55555/article/details/81325254