因为之前的案例都是在main方法启动主线程订阅消息,那么在springboot项目中如何实现实时订阅消息呢
参考博客
https://blog.csdn.net/qq_37634156/article/details/126287394
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.0.0</version>
</dependency>
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* @Author: zhe
* @Data:2023-07-05 11:12
* @Description: kafka消息消费者封装
*/
@Component
public class KafkaMQConsumer {
private KafkaConsumer<String, String> consumer;
// 一次请求的最大等待时间 ms
private final int waitTime = 1000;
/**
* 消费者创建
*/
public KafkaMQConsumer() {
Properties props = new Properties();
//10.10.2.200 localhost:9092
props.put("bootstrap.servers", "localhost:9092");
//消费者所在的用户组,同一个组对于消息的消费只能有一次,不同组可以共同消费同一条消息
props.put("group.id", "xxxx");
//指定了消费者是否自动提交偏移量,
props.put("enable.auto.commit", "false");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
//server.keystore.jks证书所在路径,以及密码。由消息服务器颁发。
// props.put("ssl.keystore.location","/root/securityCA/server.keystore.jks");
// props.put("ssl.keystore.password", "123456");
// props.put("security.protocol","SSL");
// props.put("ssl.truststore.type", "JKS");
// props.put("ssl.keystore.type", "JKS");
//client.truststore.jks证书所在路径,以及密码。由消息服务器颁发。
// props.put("ssl.truststore.location","/root/XXX.jks");
// props.put("ssl.truststore.password", "123456");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer(props);
}
/**
*消息订阅topic
* @param topic
*/
public void subscribe(List topic) {
consumer.subscribe(topic);
}
/**
*消费消息
* @return
*/
public ConsumerRecords<String, String> poll() {
return consumer.poll(waitTime);
}
/**
*消息手动ack 提交offset
* @return
*/
public void commitSync(Map<TopicPartition, OffsetAndMetadata> map) {
consumer.commitSync(map);
}
/**
*关闭
* @return
*/
public void close() {
consumer.close();
}
}
package com.xxxx.kafka;
import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* @Author:
* @Data:2023-07-05 09:01
* @Description: kafka订阅消息 线程启动类
*/
@Slf4j
@Component("kafkaListenerBPInfoJob")
public class kafkaListenerBPInfoJob extends Thread {
private static final Logger logger = LoggerFactory.getLogger(kafkaListenerBPInfoJob.class);
private KafkaMQConsumer kafkaConsumer;
@Autowired
private SubscribeManageService subscribeManageService;
@Autowired
private SubscribeInfoLogDao subscribeInfoLogDao;
@PostConstruct
public void receiveSubscribeMessage() {
//配置文件构建消费者
kafkaConsumer = new KafkaMQConsumer();
//topic是orgId 4419001250020000000
List<String> topics = new ArrayList<>();
topics.add("4419001250020000000");
//订阅消息
kafkaConsumer.subscribe(topics);
//创建一个线程池 Kafka的消费并行度依赖Topic配置的分区数,如分区数为10,那么最多10台机器来并行消费
this.start();
}
@Override
public void run() {
//消息轮询是消费者的核心,通过轮询向服务器请求数据
//!Thread.currentThread().isInterrupted() 后续可以完善关闭消费者线程
while (true) {
//消费消息 kafka实现了并发检测 方法只能单线程调用
ConsumerRecords<String, String> records = kafkaConsumer.poll();
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
for (ConsumerRecord<String, String> record : partitionRecords) {
//对消息做简单地打印操作
String info=String.format("kafka消息:topic=%s, partition=%s, offset=%d, key=%s, value=%s", record.topic(), record.partition(), record.offset(), record.key(), record.value());
logger.info(info);
//省略使用写入数据库或缓存的业务逻辑....
}
}
long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
//提交消息消费的offset
kafkaConsumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
}
}
}
}