第一种
1.kafka相关pom依赖:(0.10.1.1版本)
org.apache.kafka
kafka-clients
k
a
f
k
a
.
c
l
i
e
n
t
.
v
e
r
s
i
o
n
<
/
v
e
r
s
i
o
n
>
<
s
c
o
p
e
>
c
o
m
p
i
l
e
<
/
s
c
o
p
e
>
<
e
x
c
l
u
s
i
o
n
s
>
<
e
x
c
l
u
s
i
o
n
>
<
a
r
t
i
f
a
c
t
I
d
>
s
l
f
4
j
−
a
p
i
<
/
a
r
t
i
f
a
c
t
I
d
>
<
g
r
o
u
p
I
d
>
o
r
g
.
s
l
f
4
j
<
/
g
r
o
u
p
I
d
>
<
/
e
x
c
l
u
s
i
o
n
>
<
/
e
x
c
l
u
s
i
o
n
s
>
<
/
d
e
p
e
n
d
e
n
c
y
>
<
d
e
p
e
n
d
e
n
c
y
>
<
g
r
o
u
p
I
d
>
o
r
g
.
a
p
a
c
h
e
.
k
a
f
k
a
<
/
g
r
o
u
p
I
d
>
<
a
r
t
i
f
a
c
t
I
d
>
k
a
f
k
a
2
.
11
<
/
a
r
t
i
f
a
c
t
I
d
>
<
v
e
r
s
i
o
n
>
{kafka.client.version}</version> <scope>compile</scope> <exclusions> <exclusion> <artifactId>slf4j-api</artifactId> <groupId>org.slf4j</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.11</artifactId> <version>
kafka.client.version</version><scope>compile</scope><exclusions><exclusion><artifactId>slf4j−api</artifactId><groupId>org.slf4j</groupId></exclusion></exclusions></dependency><dependency><groupId>org.apache.kafka</groupId><artifactId>kafka2.11</artifactId><version>{kafka.client.version}
org.slf4j
slf4j-log4j12
2.logback的配置
[%d] [%-5level] [%thread] [%logger] - %msg%n
L
O
G
H
O
M
E
/
{LOG_HOME}/
LOGHOME/{SYS_NAME}/${APP_LOGS_FILENAME}.%d{yyyy-MM-dd}.log
10
%msg
0
2048
L
O
G
H
O
M
E
/
{LOG_HOME}/
LOGHOME/{DATA_NAME}/${EVENT_LOGS_FILENAME}.%d{yyyy-MM-dd}.log
10
100MB
UTF-8 [%d] [%-5level] [%thread] [%logger] - %msg%n 0 2048 3.自定义KafkaAppender package com.demo.kafka.logs;import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.Assert;
import com.demo.kafka.KafkaConfigUtils;
import ch.qos.logback.core.AppenderBase;
import ch.qos.logback.core.Layout;
public class KafkaAppender extends AppenderBase {
//此处,logback.xml中的logger的name属性,输出到本地
private static final Logger log = LoggerFactory.getLogger(“local”);
protected Layout layout;
private Producer<String, String> producer;//kafka生产者
@Override
public void start() {
Assert.notNull(layout, “you don’t set the layout of KafkaAppender”);
super.start();
this.producer = KafkaConfigUtils.createProducer();
}
@Override
public void stop() {
super.stop();
producer.close();
System.out.println("[Stopping KafkaAppender !!!]");
}
@Override
protected void append(E event) {
String msg = layout.doLayout(event);
//拼接消息内容
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(
KafkaConfigUtils.DEFAULT_TOPIC_NAME, msg);
System.out.println("[推送数据]:" + producerRecord);
//发送kafka的消息
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
//监听发送结果
if (exception != null) {
exception.printStackTrace();
log.info(msg);
} else {
System.out.println("[推送数据到kafka成功]:" + metadata);
}
}
});
}
public Layout getLayout() {
return layout;
}
public void setLayout(Layout layout) {
this.layout = layout;
}
}
4.测试代码段
//logback.xml中logger的name属性(输出到kafka)
private static final Logger log = LoggerFactory.getLogger(“kafka-event”);
@Override
public void produce(String msgContent) {
if (StringUtils.isEmpty(msgContent)) {
return;
}
//打印日志
log.info(msgContent);
}
第二种
1安装kafka
https://blog.csdn.net/shenyanwei/article/details/90374859?utm_medium=distribute.pc_relevant_download.none-task-blog-baidujs-2.nonecase&depth_1-utm_source=distribute.pc_relevant_download.none-task-blog-baidujs-2.nonecase
2.配置jar包依赖:我在使用spring-boot 的1.5.4版本时候发现有kafka版本不兼容的问题,请注意
org.springframework.kafka
spring-kafka
1.1.0.RELEASE
@Bean(“kafkaTemplate”)的注解是非常关键的配置项
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Bean(“kafkaTemplate”)
public KafkaTemplate<String, String> kafkaTemplate() {
KafkaTemplate<String, String> kafkaTemplate = new KafkaTemplate<String, String>(producerFactory());
return kafkaTemplate;
}
public ProducerFactory<String, String> producerFactory() {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, “10.208.25.294:9092”);
properties.put(ProducerConfig.RETRIES_CONFIG, 0);
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);
properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return new DefaultKafkaProducerFactory<String, String>(properties);
}
}
消费者:
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
- Created by yuanyirui839 on 2017-09-13.
*/
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(4);
factory.getContainerProperties().setPollTimeout(4000);
return factory;
}
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, “IP:PORT”);//注意这里修改为kafka的具体配置项目,我这里只是为了开发演示方便
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, “100”);
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, “15000”);
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, “test-group”);
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, “latest”);
return new DefaultKafkaConsumerFactory<String, String>(properties);
}
@Bean
public KafkaListeners kafkaListeners() {
return new KafkaListeners();
}
}
kafkaListeners:
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import java.util.Optional;
/**
- Created by yuanyirui839 on 2017-09-13.
*/
public class KafkaListeners {
@KafkaListener(topics = {“test”})
public void listen(ConsumerRecord<?, ?> record) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
System.out.println("listen " + message);
// logService.insertMessage(message);
}
}
}
4.编写测试代码:
import com.alibaba.fastjson.JSON;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
/**
*
*/
@RestController
public class KafkaController {
@Autowired
KafkaTemplate kafkaTemplate;
/* ###########################kafka 测试类###########################*/
@RequestMapping("/testKafka")
@ResponseBody
public void testkafka() {
String message = “{“transNo”:“kafka23456789”,“idLogDs”:“idLogDsretyuio”}”;
try {
kafkaTemplate.send(“test”, “hi”, message);
System.out.println(“ok”);
//测试依赖项目的配置
System.out.println(StrUtil.YUAN());
UpStreamLog log = new UpStreamLog();
log.setAppId(“uuuuuuuuuuuuuuuuuuuuu”);
System.out.println(log.getAppId());
} catch (Exception e) {
e.printStackTrace();
System.out.println(e.toString());
}
}
}
第三种
1.引入依赖
org.springframework.boot
spring-boot-starter-web
org.springframework.kafka
spring-kafka
org.projectlombok
lombok
true
2.yml配置
spring:
kafka: # 指定kafka 代理地址,可以多个
bootstrap-servers: 192.168.211.137:9092,192.168.211.139:9092,192.168.211.140:9092
template: # 指定默认topic id
default-topic: producer
listener: # 指定listener 容器中的线程数,用于提高并发量
concurrency: 5
consumer:
group-id: myGroup # 指定默认消费者group id
client-id: 200
max-poll-records: 200
auto-offset-reset: earliest # 最早未被消费的offset
producer:
batch-size: 1000 # 每次批量发送消息的数量
retries: 3
client-id: 200
3.代码示例
生产者
package com.wyu.tt06kafkademo.demo;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
import java.util.Date;
@Slf4j
@Component
public class KafkaProducer {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Autowired
private ObjectMapper objectMapper;
public void send(String topic, Object body) {
Message message = new Message<>();
message.setId(System.currentTimeMillis());
message.setMessage(body.toString());
message.setTime(new Date());
String content = null;
try {
content = objectMapper.writeValueAsString(message);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
kafkaTemplate.send(topic, content);
log.info(“send {} to {} success!”, message, topic);
System.out.println(“send “+ message +” to “+ topic +” success!”);
}
}
消费者
package com.wyu.tt06kafkademo.demo;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Optional;
@Slf4j
@Component
public class KafkaConsumer {
/**
- 有消息就读取,读取消息topic,offset,key,value等信息
*/
@KafkaListener(topics = {“kafka1”})
public void listen(ConsumerRecord<?, ?> record) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
log.info(“详细消息读取-------------------->”);
log.info(“message:{} + record:{}”, message, record);
}
}
/**
- 有消息就读取,批量读取消息
*/
@KafkaListener(topics = “kafka1”)
public void onMessage(List crs) {
for(String str : crs){
log.info(“批量读取-------------------->”);
log.info(“kafka1:” + str);
}
}
/**
- 有消息就读取message
*/
@KafkaListener(topics = {“kafka1”})
public void receiveMessage(String message){
log.info(“读取message-------------------->”);
log.info(“kafka1:” + message);
try { //手动接收消息 String value = (String) record.value(); System.out.println(“手动接收<<接收到消息,进行消费>>>”+value); } catch (Exception e) { log.error(“手动接收<<消费异常信息>>>”+e.getMessage()); }finally { //最终提交确认接收到消息 手动提交 offset acknowledgment.acknowledge(); }
}
}
controller
package com.wyu.tt06kafkademo.controller;
import com.wyu.tt06kafkademo.demo.KafkaProducer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@Slf4j
@RestController
public class KafkaController {
@Autowired
private KafkaProducer kafkaProducer;
@GetMapping("/kafka/{topic}")
public String send(@PathVariable(“topic”) String topic, @RequestParam String message) {
kafkaProducer.send(topic, message);
return “success”;
}
}
第四种
环境准备,使用容器(docker)安装部署kafka
- 下载镜像
kafka需要zookeeper管理,所以需要先安装zookeeper镜像。 docker pull wurstmeister/zookeeper
然后安装kafka镜像: docker pull wurstmeister/kafka - 启动zookeeper和kafka容器
启动zookeeper镜像docker run -d --name zookeeper -p 2181:2181 -t wurstmeister/zookeeper
启动kafka镜像生成容器
docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=10.0.75.1:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.0.75.1:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -t wurstmeister/kafka
-e KAFKA_BROKER_ID=0 在kafka集群中,每个kafka都有一个BROKER_ID来区分自己
-e KAFKA_ZOOKEEPER_CONNECT=10.0.75.1:2181/kafka 配置zookeeper管理kafka的路径192.168.155.56:2181/kafka (这里同一个虚拟机ip)
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.0.75.1:9092 把kafka的地址端口注册给zookeeper (这里同一个虚拟机ip)
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 配置kafka的监听端口
- 进行测试是否部署成功
进入kafka容器的命令行: docker exec -it kafka /bin/bash
进入kafka启动命令所在bin目录: cd opt/kafka_x.xx-x.x.x/bin
运行kafka生产者发送消息: ./kafka-console-producer.sh --broker-list localhost:9092 --topic mykafka
{“datas”:[{“channel”:"",“metric”:“temperature”,“producer”:“ijinus”,“sn”:“IJA0101-00002245”,“time”:“1543207156000”,“value”:“80”}],“ver”:“1.0”}
1
4.运行kafka消费者接收消息 : kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mykafka --from-beginning
8siRJJ.png
到此容器部署并成功启动了kafka
SpringBoot集成kafka
创建Kafka主题 - Kafka提供了一个名为 kafka-topics.sh 的命令行实用程序,用于在服务器上创建主题。 打开新终端并创建一个主题一个名为test的Topic
先进到opt/kafka_x.xx-x.x.x/bin目录
./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tes
- 导入依赖:
spring-boot-demo-base
spring-boot-demo-base
1.0-SNAPSHOT
4.0.0
spring-boot-demo-kafka
1.0.0-SNAPSHOT
jar
/**
- @Description: kafka配置类
- @Versions 1.0
**/
@Configuration
@EnableConfigurationProperties({KafkaProperties.class})
@EnableKafka
@AllArgsConstructor
public class KafkaConfig {
private final KafkaProperties kafkaProperties;
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
@Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(kafkaProperties.buildProducerProperties());
}
//------------------------------以下是消费者配置-----------------
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
@Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties());
}
@Bean(“ackContainerFactory”)
public ConcurrentKafkaListenerContainerFactory<String, String> ackContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
factory.setConcurrency(3);
return factory;
}
}
4. 写个消费者KafkaMessageHandler监听消息
/**
- @Description: 消息消费者
- @Versions 1.0
**/
@Component
@Slf4j
public class KafkaMessageHandler {
/***
- 接收消息后手动提交
- @param record 消费记录
- @param acknowledgment 确认接收
- @return void
*/
@KafkaListener(topics = “test”,containerFactory = “kafkaListenerContainerFactory”)
public void handlerMessage(ConsumerRecord record, Acknowledgment acknowledgment){
try {
//手动接收消息
String value = (String) record.value();
System.out.println(“手动接收<<接收到消息,进行消费>>>”+value);
} catch (Exception e) {
log.error(“手动接收<<消费异常信息>>>”+e.getMessage());
}finally {
//最终提交确认接收到消息 手动提交 offset
acknowledgment.acknowledge();
}
}
// /***
// * 接收消息后自动提交 需要配置开启enable-auto-commit: true
// *
// * @param message 消息
// * @return void
// */
// @KafkaListener(topics = “test”,groupId = “test-consumer”)
// public void handlerMessage(String message){
// System.out.println(“接收到自动确认消息”+message);
// }
}
- 写测试类KafkaSendMessage进行发送消息:
/**
- @Description: 测试发送消息
- @Versions 1.0
**/
@SpringBootTest(classes = ApplicationKafka.class)
@RunWith(value = SpringRunner.class)
public class KafkaSendMessage {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
/***
- 简单发送消息
- @param message 消息
- @return
*/
public void testSend(String message){
//向test主题发送消息
kafkaTemplate.send(“test”,message);
}
/***
- 发送消息获取发送成功或者失败
- @param message 消息
- @return
*/
public void Send(String message){
//向test主题发送消息
ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(“test”, message);
future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable throwable) {
System.out.printf(“消息:{} 发送失败,原因:{}”, message, throwable.getMessage());
}
@Override
public void onSuccess(SendResult<String, String> stringStringSendResult) {
System.out.printf(“成功发送消息:{},offset=[{}]”, message, stringStringSendResult.getRecordMetadata().offset());
}
});
}
@Test
public void test(){
this.testSend(“这是一个简单发送消息测试”);
this.Send(“这是一个发送消息获取发送结果测试”);
}
}
测试
<?xml version="1.0" encoding="UTF-8"?><springProperty scope="context" name="logPath" source="logging.path" defaultValue="log"/>
<springProperty scope="context" name="maxHistory" source="logging.file.max-history" defaultValue="30"/>
<springProperty scope="context" name="maxFileSize" source="logging.file.max-size" defaultValue="100MB"/>
<springProperty scope="context" name="springFrameworkLogLevel" source="logging.level.org.springframework" defaultValue="ERROR"/>
<springProperty scope="context" name="rootLogLevel" source="logging.level.root" defaultValue="DEBUG"/>
<springProperty scope="context" name="appId" source="logging.appId" defaultValue="default-app"/>
<springProperty scope="context" name="kafkaLogTopic" source="logging.kafka.topic" defaultValue="ICS-BACKEND-LOG"/>
<springProperty scope="context" name="kafkaServer" source="logging.kafka.server" defaultValue="192.168.10.158:19092"/>
<!--输出到控制台-->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Debug</level>
</filter>
<encoder>
<pattern>%d{HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} Token:%X{Authorization} ReqId:%X{RequestId} TraceId:%X{TraceId} - %msg%n</pattern>
</encoder>
</appender>
<!--输出到文件-->
<appender name="info_file" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>DENY</onMatch><!-- 如果命中ERROR就禁止这条日志 -->
<onMismatch>ACCEPT</onMismatch><!-- 如果没有命中就使用这条规则 -->
</filter>
<file>${logPath}/${appId}_log_info.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logPath}/info/logback-info.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<maxFileSize>${maxFileSize}</maxFileSize>
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} Token:%X{Authorization} ReqId:%X{RequestId} TraceId:%X{TraceId} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<appender name="error_file" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<file>${logPath}/${appId}_log_error.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logPath}/error/logback-error.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<maxFileSize>${maxFileSize}</maxFileSize>
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} Token:%X{Authorization} ReqId:%X{RequestId} TraceId:%X{TraceId} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到Sentry-->
<appender name="Sentry" class="io.sentry.logback.SentryAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
</appender>
<!-- 输出到kafka -->
<appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" >
<customFields>{"appname":"${appId}","server": "${HOSTNAME}"}</customFields>
<includeMdc>true</includeMdc>
<includeContext>false</includeContext>
<includeMdcKeyName>Authorization</includeMdcKeyName>
<includeMdcKeyName>RequestId</includeMdcKeyName>
<includeMdcKeyName>TraceId</includeMdcKeyName>
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
<maxDepthPerThrowable>30</maxDepthPerThrowable>
<rootCauseFirst>true</rootCauseFirst>
</throwableConverter>
</encoder>
<topic>${kafkaLogTopic}</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=${kafkaServer}</producerConfig>
</appender>
<Logger name="springfox.documentation" level="${springFrameworkLogLevel}"></Logger>
<Logger name="org.springframework" level="${springFrameworkLogLevel}"></Logger>
<logger name="org.apache.kafka" level="info"/>
<root level="${rootLogLevel}">
<appender-ref ref="console" />
<appender-ref ref="info_file" />
<appender-ref ref="error_file" />
<appender-ref ref="Sentry" />
<appender-ref ref="kafkaAppender" />
</root>