logback 配置(显示类名、方法名、代码行号问题)

概述

logback日志配置了输出格式,但是日志输出中为“?.?(?)”,这位调试问题造成了很大的影响。

配置

光有以下配置,还不行, 还需要添加“AsyncAppender”。

<?xml version="1.0" encoding="UTF-8" ?>
<configuration>
<property name="rootPath" value="/opt/logs/netty/stockBus" />
<property name="baseFile" value="stockBus"></property>
<property name="log.root.level" value="INFO"></property>
<!-- 控制台输出日志 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>[%date{yyyy-MM-dd HH:mm:ss.SSS}] %X{logthreadId} %-5level %logger{80} %line - %msg%n
</pattern>
</encoder>
</appender>


<!-- 文件输出日志 (文件大小策略进行文件输出,每小时产生一个日志文件给异常监控平台进行分析) -->

<appender name="FILE"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${rootPath}/${baseFile}.log</File>
<!-- 日志文件rolling策略 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${rootPath}/${baseFile}_%d{yyyy-MM-dd}.%i.log
</FileNamePattern>
<maxHistory>30</maxHistory>
<!-- 按时间回滚的同时,按文件大小来回滚 -->  
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">  
                 <maxFileSize>100MB</maxFileSize>  
            </timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>


<!-- 日志输出格式 -->

<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>[%date{yyyy-MM-dd HH:mm:ss.SSS}] %X{logthreadId} %-5level %logger{80} %line - %msg%n
</Pattern>
</encoder>
</appender>

<!-- 文件输出日志 (文件大小策略进行文件输出,每小时产生一个日志文件给异常监控平台进行分析) -->
<appender name="ERRORFILE"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${rootPath}/${baseFile}_error.log</File>
<!-- 日志文件rolling策略 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${rootPath}/${baseFile}_error_%d{yyyy-MM-dd}.log.gz
</FileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
  <filter class="ch.qos.logback.classic.filter.LevelFilter">
          <level>ERROR</level>
          <onMatch>ACCEPT</onMatch>
          <onMismatch>DENY</onMismatch>
        </filter>
<!-- 日志输出格式 -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>[%date{yyyy-MM-dd HH:mm:ss.SSS}] %X{logthreadId} %-5level %logger{80} %line - %msg%n
</Pattern>
</encoder>
</appender>

<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0</discardingThreshold>
     <queueSize>10000</queueSize>
     <appender-ref ref="FILE" />

              <!--如果是true,调用者的数据将提供给远程主机,默认是false-->
     <includeCallerData>true</includeCallerData>
</appender>

<logger name="org.springframework" additivity="false">
<level value="WARN" />
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="org.apache.ibatis" level="WARN" />
<logger name="java.sql.Connection" level="WARN" />    
<logger name="java.sql.Statement" level="WARN" />
<logger name="java.sql.PreparedStatement" level="WARN" /> 
<logger name="org.mybatis.spring" level="WARN" /> 
<logger name="org.apache.commons" level="WARN">
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="org.apache.kafka" level="ERROR">
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="com.xxxx.product.intf.service.impl" additivity="false">
<level value="INFO" />
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="com.xxxx.core" additivity="false">
<level value="INFO" />
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="com.xxxx" additivity="false">
<level value="INFO" />
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="com.xxxx.netty.container.DispatcherServletHandler" additivity="false">
<level value="INFO" />
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<logger name="com" additivity="false" level="WARN">
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</logger>

<root level="${log.root.level}">
<!-- <appender-ref ref="STDOUT" /> -->
<appender-ref ref="ASYNC" />
<appender-ref ref="ERRORFILE" />
</root>

<!-- kafka推送日志 -->
<appender name="fc-stock" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <File>${rootPath}/fc-stock.log</File>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- rollover daily -->
            <fileNamePattern>${rootPath}/fc-stock-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>5</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <!-- or whenever the file size reaches 100MB -->
                <maxFileSize>500MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>

        <encoder>
            <pattern>[%d{HH:mm:ss.SSS}] %msg%n</pattern>
        </encoder>
    </appender>


<!-- kafka推送错误日志 -->

<appender name="fc-stock-error" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <File>${rootPath}/fc-stock_error.log</File>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- rollover daily -->
            <fileNamePattern>${rootPath}/fc-stock_error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>5</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <!-- or whenever the file size reaches 100MB -->
                <maxFileSize>500MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>

        <encoder>
            <pattern>[%d{HH:mm:ss.SSS}] %msg%n</pattern>
        </encoder>
    </appender>


    <logger name="fc-kafkaLog" additivity="false" level="INFO">
        <appender-ref ref="fc-stock"/>
        <appender-ref ref="fc-stock-error"/>
    </logger>


    <!-- kafka推送日志 -->

<appender name="hs-stock" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <File>${rootPath}/hs-stock.log</File>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- rollover daily -->
            <fileNamePattern>${rootPath}/hs-stock-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>5</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <!-- or whenever the file size reaches 100MB -->
                <maxFileSize>500MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>

        <encoder>
            <pattern>[%d{HH:mm:ss.SSS}] %msg%n</pattern>
        </encoder>
    </appender>


<!-- kafka推送错误日志 -->

<appender name="hs-stock-error" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <File>${rootPath}/hs-stock_error.log</File>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- rollover daily -->
            <fileNamePattern>${rootPath}/hs-stock_error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>5</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <!-- or whenever the file size reaches 100MB -->
                <maxFileSize>500MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>

        <encoder>
            <pattern>[%d{HH:mm:ss.SSS}] %msg%n</pattern>
        </encoder>
    </appender>


    <logger name="hs-kafkaLog" additivity="false" level="INFO">
        <appender-ref ref="hs-stock"/>
        <appender-ref ref="hs-stock-error"/>
    </logger>


      <!-- kafka接受回执日志 -->

<appender name="receipt-stock" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <File>${rootPath}/receipt-stock.log</File>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- rollover daily -->
            <fileNamePattern>${rootPath}/receipt-stock-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxHistory>5</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <!-- or whenever the file size reaches 100MB -->
                <maxFileSize>500MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>

        <encoder>
            <pattern>[%d{HH:mm:ss.SSS}] %msg%n</pattern>
        </encoder>
    </appender>


    <logger name="receipt-kafkaLog" additivity="false" level="INFO">
        <appender-ref ref="receipt-stock"/>
    </logger>
</configuration>

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值