spring服务接入elk

1.引入gradle版本依赖、如下依赖:

    compile('com.github.danielwegener:logback-kafka-appender:0.2.0-RC2')
    compile('org.codehaus.janino:janino:3.1.2')

2.添加logback-spring.xml文件到Resource文件夹下

如果yml配置文件有指定logging:config 日志配置,权衡是否需要保留其他包下的日志级别以及输出格式。然后修改配置指定路径: classpath:logback-spring.xml

注:可以根据想要的日志格式进行修改,也可以使用当前的pattern。

<?xml version="1.0" encoding="UTF-8"?>
<!-- 从高到地低 OFFFATALERRORWARNINFODEBUGTRACEALL -->
<!-- 日志输出规则 根据当前ROOT 级别,日志输出时,级别高于root默认的级别时 会输出 -->
<!-- 以下 每个配置的 filter 是过滤掉输出文件里面,会出现高级别文件,依然出现低级别的日志信息,通过filter 过滤只记录本级别的日志 -->
<!-- 属性描述 scan:性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。当scan为true时,此属性生效。默认的时间间隔为1分钟。
	debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false-->
<configuration scan="true" scanPeriod="60 seconds" debug="false">
    <!-- 定义日志文件 输入位置 -->
    <!-- 日志最大的历史 15-->
    <property name="maxHistory" value="30"/>
    <springProperty scope="context" name="log.path" source="logging.path" defaultValue="./logs"/>
    <springProperty scope="context" name="maxFileSize" source="logging.file.max-size" defaultValue="50MB"/>
    <springProperty scope="context" name="sentryEnable" source="sentry.enable" defaultValue="false"/>
    <springProperty scope="context" name="sentryDsn" source="sentry.dsn" defaultValue="" />
    <springProperty scope="context" name="applicationName" source="spring.application.name"/>
    <springProperty scope="context" name="serverIp" source="spring.cloud.client.ip-address"/>
    <springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="local"/>
    <springProperty scope="context" name="elkEnable" source="elk.enable" defaultValue="false" />
    <springProperty scope="context" name="elkServers" source="elk.kafka.servers" defaultValue="localhost:9092" />
    <!--    <springProperty scope="context" name="elkHost" source="elk.kafka.host" defaultValue="localhost" />-->
    <!--    <springProperty scope="context" name="elkPort" source="elk.kafka.port" defaultValue="19092" />-->
    <springProperty scope="context" name="elkTopic" source="elk.kafka.topic" defaultValue="logback" />
    <contextName>${applicationName}</contextName>
    <property name="pattern"
              value='%d{yyyy-MM-dd HH:mm:ss.SSS} [${env}] [${serverIp}] [%thread] {"SERVICE":"${applicationName}","X-B3-SpanId":"%X{X-B3-SpanId}","X-B3-TraceId":"%X{X-B3-TraceId}","X-B3-ParentSpanId":"%X{X-B3-ParentSpanId}"} [%level] %logger{30}.%method:%line - %msg%n'/>

    <!-- ConsoleAppender 控制台输出日志 -->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <!-- 对日志进行格式化 -->
        <encoder>
            <pattern>${pattern}</pattern>
        </encoder>
    </appender>

    <if condition='property("sentryEnable").contains("true")'>
        <then>
            <appender name="Sentry" class="com.getsentry.raven.logback.SentryAppender">
                <dsn>${sentryDsn}</dsn>
                <!-- 过滤器,只记录ERROR级别以上的日志 -->
                <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
                    <level>WARN</level>
                </filter>
            </appender>
        </then>
    </if>

    <if condition='property("elkEnable").contains("true")'>
        <then>
            <appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
                <encoder>
                    <pattern>${pattern}</pattern>
                </encoder>
                <topic>${elkTopic}</topic>


                <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
                <!--
                <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.ContextNameKeyingStrategy"/>
                -->
                <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

                <!-- Optional parameter to use a fixed partition -->
                <!-- <partition>0</partition> -->

                <!-- Optional parameter to include log timestamps into the kafka message -->
                <!-- <appendTimestamp>true</appendTimestamp> -->

                <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
                <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
                <!-- bootstrap.servers is the only mandatory producerConfig -->
                <!--                <producerConfig>bootstrap.servers=${elkHost}:${elkPort}</producerConfig>-->
                <producerConfig>bootstrap.servers=${elkServers}</producerConfig>
                <!-- this is the fallback appender if kafka is not available. -->
                <appender-ref ref="STDOUT" />
            </appender>

            <appender name="kafkaAsync" class="ch.qos.logback.classic.AsyncAppender">
                <!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACTDEBUGINFO级别的日志 -->
                <discardingThreshold>0</discardingThreshold>
                <!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
                <queueSize>65536</queueSize>
                <!-- 新增这行为了打印栈堆信息 -->
                <includeCallerData>true</includeCallerData>
                <neverBlock>true</neverBlock>
                <appender-ref ref="kafkaAppender" />
            </appender>
        </then>
    </if>

    <logger name="com" level="INFO" additivity="false">
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
        <appender-ref ref="STDOUT"/>
    </logger>


    <logger name="org" level="INFO" additivity="false">
        <!--<appender-ref ref="ASYNC_INFO"/>
        <appender-ref ref="ERROR"/>-->
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
        <appender-ref ref="STDOUT"/>
    </logger>

    <logger name="MONITOR-LOGGER" level="ERROR" additivity="false">
        <!--<appender-ref ref="MONITOR-APPENDER"/>-->
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
    </logger>


    <!-- 统一日志输出 -->

    <appender name="InfoFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <append>true</append>
        <file>${log.path}/${applicationName}-info.log</file>

        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/%d{yyyy-MM-dd}/${applicationName}-info.%d{yyyy-MM-dd.HH}-%i.log.gz</fileNamePattern>
            <maxHistory>30</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${maxFileSize}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <encoder>
            <pattern>${pattern}</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>DENY</onMatch>
            <onMismatch>ACCEPT</onMismatch>
        </filter>
    </appender>

    <appender name="ErrorFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <append>true</append>
        <file>${log.path}/${applicationName}-error.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/%d{yyyy-MM-dd}/${applicationName}-error.%d{yyyy-MM-dd.HH}-%i.log.gz</fileNamePattern>
            <maxHistory>30</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${maxFileSize}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${pattern}</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>ERROR</level>
        </filter>
    </appender>

    <appender name="InfoFileAsync" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <queueSize>10000</queueSize>
        <appender-ref ref="InfoFile" />
    </appender>


    <appender name="ErrorFileAsync" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <queueSize>10000</queueSize>
        <appender-ref ref="ErrorFile" />
    </appender>



    <logger name="com" level="WARN">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>
    <logger name="org" level="WARN">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>
    <logger name="com.kunchi" level="INFO">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>
    <logger name="org.hibernate" level="INFO">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>

    <!-- root级别 DEBUG -->
    <root level="INFO">
        <appender-ref ref="STDOUT"/>
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
                <!--<appender-ref ref="kafkaAppender"/>-->
            </then>
        </if>
    </root>
</configuration>

3.在yml配置文件中最下方添加如下配置:

elk:
  enable: true
  kafka:
    #本地host文件加映射:10.0.0.200 offline-k8s-master
    host: 10.0.0.200
    port: 41004
    topic: obc_logback
    servers: ${elk.kafka.host}:${elk.kafka.port}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值