springboot项目如何通过logback-kafka-appender进行日志搜集,并输出到指定文件

1.引入logback-kafka-appender的pom依赖


<dependency>
    <groupId>com.github.danielwegener</groupId>
    <artifactId>logback-kafka-appender</artifactId>
    <version>0.2.0</version>
    <scope>runtime</scope>
</dependency>
<dependency>
    <groupId>ch.qos.logback</groupId>
    <artifactId>logback-classic</artifactId>
    <version>1.2.3</version>
    <scope>runtime</scope>
</dependency>

2.编写logback-spring.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration>

	<!-- kafka appender 用于生产环境 -->
	<appender name="kafkaAppender"
			  class="com.github.danielwegener.logback.kafka.KafkaAppender">
		<!--<filter class="xx.xx.LogKafkaFilter"/> 日志过滤器,如果有需要可以指定要输出自定义日志 -->
		<encoder>
			<pattern>%d!#@%p!#@%file:%line[%X{ip}][%thread]!#@[%X{X-B3-TraceId},%X{X-B3-SpanId},%X{X-B3-ParentSpanId}]!#@%m%n</pattern>
			<!-- 控制台也要使用UTF-8,不要使用GBK,否则会中文乱码 -->
			<charset>UTF-8</charset>
		</encoder>

		<topic>log-collection</topic>
		<!-- 自定义的主键策略 -->
		<keyingStrategy class="xx.xx.LogKeyStrategy"/>
		<!-- 异步发送kafka消息 -->
		<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>

		<producerConfig>bootstrap.servers=127.0.0.1:9092</producerConfig>
      <producerConfig>retries=1</producerConfig>
      <producerConfig>batch-size=16384</producerConfig>
      <producerConfig>buffer-memory=33554432</producerConfig>
      <producerConfig>properties.max.request.size==2097152</producerConfig>

	</appender>



	<!-- %m输出的信息,%p日志级别,%t线程名,%d日期,%c类的全名,%i索引【从数字0开始递增】,,, -->
	<!-- appender是configuration的子节点,是负责写日志的组件。 -->
	<!-- ConsoleAppender:把日志输出到控制台 -->
	<appender name="console"
		class="ch.qos.logback.core.ConsoleAppender">
		<encoder>
			<pattern>%d %p (%file:%line\)-[%X{X-B3-TraceId},%X{X-B3-SpanId},%X{X-B3-ParentSpanId}]- %m%n</pattern>
			<!-- 控制台也要使用UTF-8,不要使用GBK,否则会中文乱码 -->
			<charset>UTF-8</charset>
		</encoder>
	</appender>
	<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
	<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是pdfsync.log -->
	
	<!-- 控制台输出日志级别 -->
	<root level="INFO">
		<appender-ref ref="kafkaAppender" />
	</root>
	

</configuration>

3.代码里编写自定义主键策略LogKeyStrategy,将上面的主键策略设置为你定义的类的全路径,

主要目的是方便同一个请求打印相同的日志id,方便请求日志追踪

import java.util.Map;

import com.github.danielwegener.logback.kafka.keying.KeyingStrategy;

import ch.qos.logback.classic.spi.ILoggingEvent;
public class LogKeyStrategy implements KeyingStrategy<Object>{

	@Override
	public byte[] createKey(Object e) {
		
		if(e instanceof ILoggingEvent){
			
			ILoggingEvent logEvent = (ILoggingEvent)e;
			Map<String, String> mdcProperty = logEvent.getMDCPropertyMap();
			String traceId = mdcProperty.get("X-B3-TraceId");
			
			if(traceId != null && traceId.length() > 0){
				return traceId.getBytes();
			}
		}
		
		return null;
	}

}

4.建一个另外的微服务,专门用于kafka中消费日志,前提是项目中有kafka的相关配置并能使用

该项目的消费类:


import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Component;

/**
 * kafka日志收集<br>
 *
 */
@Component
public class KafkaLogReceiver {

    private static Logger logger = LoggerFactory.getLogger(KafkaLogReceiver.class);


    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"0"}, topic = "elec-wo-log")})
    public void receiveP0(String msg) {
        logger.info("P-{}:{}", 0, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"1"}, topic = "elec-wo-log")})
    public void receiveP1(String msg) {
        logger.info("P-{}:{}", 1, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"2"}, topic = "elec-wo-log")})
    public void receiveP2(String msg) {
        logger.info("P-{}:{}", 2, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"3"}, topic = "elec-wo-log")})
    public void receiveP3(String msg) {
        logger.info("P-{}:{}", 3, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"4"}, topic = "elec-wo-log")})
    public void receiveP4(String msg) {
        logger.info("P-{}:{}", 4, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"5"}, topic = "elec-wo-log")})
    public void receiveP5(String msg) {
        logger.info("P-{}:{}", 5, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"6"}, topic = "elec-wo-log")})
    public void receiveP6(String msg) {
        logger.info("P-{}:{}", 6, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"7"}, topic = "elec-wo-log")})
    public void receiveP7(String msg) {
        logger.info("P-{}:{}", 7, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"8"}, topic = "elec-wo-log")})
    public void receiveP8(String msg) {
        logger.info("P-{}:{}", 8, msg);
    }

    @KafkaListener(topicPartitions = {@TopicPartition(partitions = {"9"}, topic = "elec-wo-log")})
    public void receiveP9(String msg) {
        logger.info("P-{}:{}", 9, msg);
    }


}

日志消费类中logback-spring.xml的配置如下

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
	<!-- %m输出的信息,%p日志级别,%t线程名,%d日期,%c类的全名,%i索引【从数字0开始递增】,,, -->
	<!-- appender是configuration的子节点,是负责写日志的组件。 -->
	<!-- ConsoleAppender:把日志输出到控制台 -->
	<appender name="console"
		class="ch.qos.logback.core.ConsoleAppender">
		<encoder>
			<pattern>%d %p (%file:%line\)-[%X{X-B3-TraceId},%X{X-B3-SpanId},%X{X-B3-ParentSpanId}]- %m%n</pattern>
			<!-- 控制台也要使用UTF-8,不要使用GBK,否则会中文乱码 -->
			<charset>UTF-8</charset>
		</encoder>
	</appender>
	<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
	<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是pdfsync.log -->
	<!-- 2.如果日期没有发生变化,但是当前日志的文件大小超过1KB时,对当前日志进行分割 重命名 -->
	<appender name="elecWoAppender"
		class="ch.qos.logback.core.rolling.RollingFileAppender">
		<File>D:/tmp2/elec_wo.log</File>
		<!-- rollingPolicy:当发生滚动时,决定 RollingFileAppender 的行为,涉及文件移动和重命名。 -->
		<!-- TimeBasedRollingPolicy: 最常用的滚动策略,它根据时间来制定滚动策略,既负责滚动也负责触发滚动 -->
		<rollingPolicy
			class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!-- 活动文件的名字会根据fileNamePattern的值,每隔一段时间改变一次 -->
			<!-- 文件名:log/pdfsync.2017-12-05.0.log -->
			<fileNamePattern>D:/tmp2/%d/elec_wo.%d.%i.log
			</fileNamePattern>
			<!-- 每产生一个日志文件,该日志文件的保存期限为30天 -->
			<maxHistory>360</maxHistory>
			<timeBasedFileNamingAndTriggeringPolicy
				class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
				<!-- maxFileSize:这是活动文件的大小,设置为100MB -->
				<maxFileSize>100MB</maxFileSize>
			</timeBasedFileNamingAndTriggeringPolicy>
		</rollingPolicy>
		<encoder>
			<!-- pattern节点,用来设置日志的输入格式 -->
			<pattern>
				%m%n
			</pattern>
			<!-- 记录日志的编码 -->
			<charset>UTF-8</charset> 
		</encoder>
	</appender>
	<!-- 控制台输出日志级别 -->
	<root level="INFO">
		<appender-ref ref="console" />
	</root>
	<!-- 指定项目中某个包,当有日志操作行为时的日志记录级别 -->
	<!-- com.agile.sync.pdf.service.PdfSyncService为包,也就是只要是发生在这个根包下面的所有日志操作行为的权限都是INFO -->
	<!-- 级别依次为【从高到低】:FATAL > ERROR > WARN > INFO > DEBUG > TRACE -->
	<!-- additivity true表示不仅仅输出只文件而且输出值控制台,开发是为true,生产配置false -->
    <!-- 你定义的日志消费类,日志输出级别配置 -->
	<logger name="xxx.xx.KafkaLogReceiver" level="INFO" additivity="false">
		<appender-ref ref="console" />
	</logger>

</configuration>

至此完成其他微服务负责将日志发送到kafka, 日志消费项目负责将日志统一按照规则搜集到指定路径下

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值