项目实践-Spring Boot实现Kafka消息监听
前言
**书山有路勤为径,学海无涯苦作舟**
记录程序员生活点点滴滴,希望记录的内容能帮助到努力爬山的各位伙伴!
标签:Kafka/消息监听
一、Kafka概念
- Kakfa是一个分布式的基于发布/订阅模式的消息队列(message queue)。
- 消息队列的模式:A.点对点模式 B.发布/订阅模式。
- 基础架构有:broker(负责缓冲消息)、生产者(负责发布消息)、消费者组(负责处理消息)构成,当前还包括zookeeper(组件集群)。
- Kafka数据保留时间默认是7天。
- Kafka中的消息是以topic进行分类的,生产者生成消息,消费者消费消息,都是面向topic的。
- Topic(partition/replication)是一个逻辑上的概念,而partition是物理上的概念。
- Kafka的partition的分区的作用:提供并发提高性能,因为读写是partition为单位读写的;
- kafka通过向生产者发送ack来保证数据可靠性,若生产者确认收到则进行下一轮发送,否则重新发送数据。
- 推荐一个博主:Kafka概念扫盲
二、SpringBoot集成Kafka
1.引入Maven依赖
<!--监听kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
2.Kafka基本配置
kafka:
bootstrap-servers: ${spring.server.ip}:9092
producer:
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
group-id: kafka-group
auto-offset-reset: earliest
enable-auto-commit: true
auto-commit-interval: 1000
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
topic-name: kafka_event
备注:
- 在我的项目中Kafka配置信息放在bootstrap.yml文件中
- topic-name: kafka_event是本次监听的目标队列
三、实现消息监听
1.创建Kafka配置类
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.Collections;
import java.util.Properties;
/**
* @author Miracle
* @title: KafkaConfig
* @projectName proxy
* @description: 【Kafka配置类】
* @date 2021/6/1511:20
*/
@Configuration
@Slf4j
public class KafkaConfig {
@Value("${spring.kafka.bootstrap-servers}")
public String kafka_server;
@Value("${spring.kafka.consumer.key-deserializer}")
public String kafka_consumer_key;
@Value("${spring.kafka.consumer.value-deserializer}")
public String kafka_consumer_value;
@Value("${spring.kafka.consumer.enable-auto-commit}")
public String kafka_auto_config;
@Value("${spring.kafka.consumer.auto-commit-interval}")
public String kafka_commmit_interval;
@Value("${spring.kafka.consumer.group-id}")
public String kafka_group_id;
@Value("${spring.kafka.consumer.topic-name}")
public String kafka_topic_name;
@Autowired
private KafkaConsumerListener kafkaConsumerListener;
public static KafkaConsumer<String, String> kafkaConsumer;
@Bean
public void loadKafkaConfig() {
Properties p = new Properties();
p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka_server);
p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafka_consumer_key);
p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafka_consumer_value);
p.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, kafka_auto_config);
p.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafka_commmit_interval);
p.put(ConsumerConfig.GROUP_ID_CONFIG, kafka_group_id);
kafkaConsumer = new KafkaConsumer<String, String>(p);
kafkaConsumer.subscribe(Collections.singletonList(kafka_topic_name));// 订阅消息
log.info("消息订阅成功!kafka配置:" + p.toString());
//启动消息监听线程
KafkaListenerJob kafkaListenerJob = new KafkaListenerJob(kafkaConsumerListener);
Thread t = new Thread(kafkaListenerJob);
t.start();
}
}
2.创建Kafka监听任务
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.beans.factory.annotation.Autowired;
/**
* @author Miracle
* @title: KafkaListenerJob
* @projectName proxy
* @description: 【Kafka监听任务】
* @date 2021/6/1511:20
*/
@Slf4j
public class KafkaListenerJob implements Runnable{
@Autowired
private KafkaConsumerListener kafkaConsumerListener;
//注入消息监听处理类
public KafkaListenerJob(KafkaConsumerListener kafkaConsumerListener) {
this.kafkaConsumerListener = kafkaConsumerListener;
}
@Override
public void run() {
log.info("kafka消息监听任务已启动!");
//进行消息监听
while (true) {
ConsumerRecords<String, String> records = KafkaConfig.kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
try {
kafkaConsumerListener.listen(record);
} catch (Exception e) {
log.error("消息消费异常!", e);
}
}
}
}
}
3.Kafka监听任务处理类
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* @author Miracle
* @title: KafkaConsumerListener
* @projectName proxy
* @description: 【Kafka监听任务处理】
* @date 2021/6/1511:21
*/
@Slf4j
@Service
public class KafkaConsumerListener {
@Autowired
RabbitMessageProducer rabbitMessageProducer;
/**
* kafka消息处理类
*
* @param consumerRecord
*/
public void listen(ConsumerRecord<String, String> consumerRecord) throws Exception {
try {
String value = (String) consumerRecord.value();
log.info("监听Kafka消息:" + value);
// 实际业务处理逻辑
}catch (Exception e){
throw new Exception(this.getClass().getName() + "Kafka消息处理异常:" + e);
}
}
}
结言
*登山路上的慕码人,理解不透的地方还请各位指点!*