<?xml version="1.0" encoding="UTF-8"?><!-- scan:当此属性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true。 scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,
默认单位是毫秒当scan为true时,此属性生效。默认的时间间隔为1分钟。 debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。
默认值为false。 --><!-- <configuration scan="false" scanPeriod="60 seconds" debug="false"> --><configurationdebug="false"><!-- 实现日志ip打印 --><conversionRuleconversionWord="ip"converterClass="com.zxj.elk.LogIpConfig"/><!--从配置文件获取app名称--><springPropertyscope="context"name="springAppName"source="spring.application.name"/><!-- 彩色日志(IDE下载插件才可以生效) --><!-- 彩色日志依赖的渲染类 --><conversionRuleconversionWord="clr"converterClass="org.springframework.boot.logging.logback.ColorConverter"/><conversionRuleconversionWord="wex"converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/><conversionRuleconversionWord="wEx"converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/><!-- 彩色日志格式 --><propertyname="CONSOLE_LOG_PATTERN"value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/><!-- ConsoleAppender 表示控制台输出 --><appendername="CONSOLE"class="ch.qos.logback.core.ConsoleAppender"><!-- 日志输出格式: %d表示日期时间, %thread表示线程名, %-5level:级别从左显示5个字符宽度, %logger{50}
表示logger名字最长50个字符,否则按照句点分割。 %msg:日志消息, %n是换行符 --><encoder><pattern>${CONSOLE_LOG_PATTERN}
</pattern></encoder></appender><appendername="kafkaAppender"class="com.github.danielwegener.logback.kafka.KafkaAppender"><encodercharset="UTF-8"class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"><providers><timestamp><timeZone>GMT+8</timeZone></timestamp><!--必须用两个pattern,不然日志不完整--><pattern><pattern>
{
"ip":"%ip",
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message",
"stack_trace": "%exception{30}"
}
</pattern></pattern></providers></encoder><!-- 此处即为kafka的主题Topic名称--><topic>${springAppName:-}-log</topic><!-- we don't care how the log messages will be partitioned --><keyingStrategyclass="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/><!-- use async delivery. the application threads are not blocked by logging --><deliveryStrategyclass="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/><!-- each <producerConfig> translates to regular kafka-client config (format: key=value) --><!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs --><!-- bootstrap.servers is the only mandatory producerConfig --><producerConfig>bootstrap.servers=localhost:9092</producerConfig><!-- don't wait for a broker to ack the reception of a batch. --><producerConfig>acks=0</producerConfig><!-- wait up to 1000ms and collect log messages before sending them as a batch --><producerConfig>linger.ms=1000</producerConfig><!-- even if the producer buffer runs full, do not block the application but start to drop messages --><producerConfig>max.block.ms=0</producerConfig><!-- define a client-id that you use to identify yourself against the kafka broker --><producerConfig>client.id=0</producerConfig><!-- this is the fallback appender if kafka is not available. --><appender-refref="CONSOLE"/></appender><rootlevel="info"><appender-refref="CONSOLE"/><appender-refref="kafkaAppender"/></root></configuration>