logback 配置文件讲解
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<springProperty scope="context" name="LOG_HOME" source="logging.path" defaultValue="logs"/>
<springProperty scope="context" name="LOG_ROOT_LEVEL" source="logging.level.root" defaultValue="INFO"/>
<springProperty scope="context" name="STDOUT" source="log.stdout" defaultValue="STDOUT"/>
<!-- 变量标签参数-->
<property name="LOG_PREFIX" value="CI"/>
<property name="LOG_CHARSET" value="UTF-8"/>
<property name="LOG_DIR" value="${LOG_HOME}/%d{yyyyMMdd}"/>
<property name="LOG_MSG" value="[%d{yyyyMMdd HH:mm:ss.SSS}] | [%highlight(%-5level)] | [${HOSTNAME}] | [%thread] | [%logger{36}] | --> %msg|%n "/>
<property name="MAX_FILE_SIZE" value="50MB"/>
<property name="MAX_HISTORY" value="20"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<filter class="com.plumelog.logback.util.FilterSyncLogger"/>
<encoder>
<!--格式化输出日志 这里应用了上面的“LOG_MSG” 标签 -->
<pattern>${LOG_MSG}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- LevelFilter: 级别过滤器,根据日志级别进行过滤。如果日志级别等于配置级别,过滤器会根据onMath 和 onMismatch接收或拒绝日志。有以下子节点
<level>:设置过滤级别
<onMatch>:用于配置符合过滤条件的操作
<onMismatch>:用于配置不符合过滤条件的操作
将过滤器的日志级别配置为DEBUG,所有DEBUG级别的日志交给appender处理,非DEBUG级别的日志,被过滤掉
-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<OnMismatch>DENY</OnMismatch>
<OnMatch>ACCEPT</OnMatch>
</filter>
<!-- 设置文件输出路径 -->
<File>${LOG_HOME}/debug/debug_${LOG_PREFIX}.log</File>
<!-- rollingPolicy,用来设置日志的滚动策略,当达到条件后会自动将条件前的日志生成一个备份日志文件,
条件后的日志输出到最新的日志文件中。常用的是按照时间来滚动(使用的类TimeBaseRollingPolicy),
还有一种就是基于索引来实现(使用的类FixedWindowRollingPolicy)。
-->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/debug_${LOG_PREFIX}%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${LOG_MSG}</pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="com.plumelog.logback.util.FilterSyncLogger"/>
<File>${LOG_HOME}/info/info_${LOG_PREFIX}.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/info_${LOG_PREFIX}%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${LOG_MSG}</pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name="FILE_WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<OnMismatch>DENY</OnMismatch>
<OnMatch>ACCEPT</OnMatch>
</filter>
<File>${LOG_HOME}/warn/warn_${LOG_PREFIX}.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/warn_${LOG_PREFIX}%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${LOG_MSG}</pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<OnMismatch>DENY</OnMismatch>
<OnMatch>ACCEPT</OnMatch>
</filter>
<File>${LOG_HOME}/error/err_${LOG_PREFIX}.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/err_${LOG_PREFIX}%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${LOG_MSG}</pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<!--同时我们可以自定义日志用途-->
<appender name="REQUEST_LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<File>${LOG_HOME}/info/info_request.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/info_request.%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<Pattern>${LOG_MSG}</Pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name="SQL_INFO_LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<File>${LOG_HOME}/info/info_sql.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/info_sql.%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<Pattern>${LOG_MSG}</Pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name="APP_LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<File>${LOG_HOME}/info/info_appLog.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_DIR}/info_appLog.%i.log</FileNamePattern>
<MaxHistory>${MAX_HISTORY}</MaxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<Pattern>${LOG_MSG}</Pattern>
<charset>${LOG_CHARSET}</charset>
</encoder>
</appender>
<appender name ="APP_LOG_ASYNC" class= "ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="APP_LOG" />
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<!-- 0代表不丢弃 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>256</queueSize>
</appender>
<appender name ="SQL_LOG_ASYNC" class= "ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="SQL_INFO_LOG" />
<discardingThreshold>0</discardingThreshold>
<queueSize>256</queueSize>
</appender>
<!--无入侵的分布式日志系统,基于log4j、log4j2、logback搜集日志,设置链路ID,方便查询关联日志
默认使用redis来队列部署-->
<appender name="plumelog" class="com.plumelog.logback.appender.RedisAppender">
<appName>CI-SERVER</appName>
<redisHost>redis_ip</redisHost>
<!-- <redisAuth>123456</redisAuth>-->
<redisPort>6379</redisPort>
<expand>sleuth</expand>
</appender>
<!-- 开始配置具体的要使用log的范围
name ==> 要输出日志的包的范围
level ==> 日志级别
additivity ==> 继承标志位
子属性 就是自己的具体规则
-->
<!-- 实例 -->
<logger name="com.ralph.mapper" level="DEBUG" additivity="false">
<appender-ref ref="SQL_LOG_ASYNC" />
</logger>
<!--logback配置文件中注意要加上plumelog的appender,然后在root中引用appender,这个是推送日志到redis队列中的配置-->
<!--
root节点是必选节点,用来指定最基础的日志输出级别,只有一个level属性
level:用来设置打印级别,大小写无关:TRACE, DEBUG, INFO, WARN, ERROR, ALL 和 OFF,默认是DEBUG
可以包含零个或多个appender元素。
-->
<root level="${LOG_ROOT_LEVEL}">
<appender-ref ref="${STDOUT}"/>
<appender-ref ref="plumelog"/>
<appender-ref ref="FILE_DEBUG"/>
<appender-ref ref="FILE_INFO"/>
<appender-ref ref="FILE_WARN"/>
<appender-ref ref="FILE_ERROR"/>
</root>
</configuration>