SpringBoot整合Kafka
GitHub: link. 欢迎star
注意:本篇博客风格(不多比比就是撸代码!!!)
一、maven依赖
<!-- https://mvnrepository.com/artifact/org.springframework.kafka/spring-kafka -->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.7.8</version>
</dependency>
二、KafkaProducer.java
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
/**
* @author Andon
* 2021/11/12
* <p>
* 生产者
*/
@Slf4j
@Component
public class KafkaProducer {
@Resource
private KafkaTemplate<String, String> kafkaTemplate;
public void send(String topic, String json) {
kafkaTemplate.send(topic, json);
}
}
三、KafkaConsumer.java
import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import org.springframework.util.ObjectUtils;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* @author Andon
* 2021/11/12
* <p>
* 消费者
*/
@Slf4j
@Component
public class KafkaConsumer {
private static CopyOnWriteArrayList<JSONObject> messageList = new CopyOnWriteArrayList<>();
@KafkaListener(
id = "consume1", //id是消费者监听容器
topicPartitions = { //配置topic和分区:监听两个topic: test02、topic04
@TopicPartition(topic = "${kafka.topic.test02}", partitions = {"0", "2"}), //test02只接收分区0,2的消息
@TopicPartition(topic = "topic04", partitions = {"0", "2"}, partitionOffsets = @PartitionOffset(partition = "4", initialOffset = "5")) //topic04接收分区0,2,4的消息,但是分区5的消费者初始位置为5
})
public void consume(ConsumerRecord<String, String> consumerRecord, Acknowledgment acknowledgment) {
if (!ObjectUtils.isEmpty(consumerRecord)) {
String message = consumerRecord.value();
log.info("consume message:{}", message);
messageList.add(JSONObject.parseObject(message));
}
if (messageList.size() >= 5) {
log.info("messageList:{}", JSONObject.toJSONString(messageList));
log.info("messageList.size:{}", messageList.size());
acknowledgment.acknowledge(); //手动提交offset
messageList.clear();
}
}
@KafkaListener(
id = "consume2", //id是消费者监听容器
topicPartitions = { //配置topic和分区:监听两个topic: test02、topic04
@TopicPartition(topic = "${kafka.topic.test02}", partitions = {"1", "3"}), //test02只接收分区1,3的消息
@TopicPartition(topic = "topic04", partitions = {"1", "3"}, partitionOffsets = @PartitionOffset(partition = "5", initialOffset = "5")) //topic04接收分区1,3,5的消息,但是分区5的消费者初始位置为5
})
public void consume2(ConsumerRecord<String, String> consumerRecord, Acknowledgment acknowledgment) {
if (!ObjectUtils.isEmpty(consumerRecord)) {
String message = consumerRecord.value();
log.info("consume2 message:{}", message);
messageList.add(JSONObject.parseObject(message));
}
if (messageList.size() >= 5) {
log.info("messageList:{}", JSONObject.toJSONString(messageList));
log.info("messageList.size:{}", messageList.size());
acknowledgment.acknowledge(); //手动提交offset
messageList.clear();
}
}
@KafkaListener(
id = "consumeTopicTest01", //id是消费者监听容器
topics = {"${kafka.topic.test01}", "test03"} //监听的topic:test01、test03
)
public void consumeTopicTest01(ConsumerRecord<String, String> consumerRecord, Acknowledgment acknowledgment) {
if (!ObjectUtils.isEmpty(consumerRecord)) {
String message = consumerRecord.value();
log.info("consumeTopicTest01 message:{}", message);
messageList.add(JSONObject.parseObject(message));
}
if (messageList.size() >= 5) {
log.info("messageList:{}", JSONObject.toJSONString(messageList));
log.info("messageList.size:{}", messageList.size());
acknowledgment.acknowledge(); //手动提交offset
messageList.clear();
}
}
}
四、application.yml
spring:
#============== kafka ===================
kafka:
# kafka 地址,可以多个,逗号隔开
bootstrap-servers: ####:9092
#=============== producer =======================
producer:
# 指定消息key和消息体的编解码方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
#=============== consumer =======================
# 指定默认消费者group id
consumer:
group-id: test
# earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
# latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
# none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
auto-offset-reset: latest
# 指定消息key和消息体的编解码方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 关闭自动提交offset
enable-auto-commit: false
listener:
# 消费端监听的topic不存在时,项目启动会报错(关掉)
missing-topics-fatal: false
# 手动提交偏移量的ackMode方式
ack-mode: manual_immediate
五、DatabaseTest.java
import com.alibaba.fastjson.JSONObject;
import com.andon.springbootkafka.kafka.KafkaProducer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import javax.annotation.Resource;
import java.util.Date;
/**
* @author Andon
* 2021/11/12
*/
@RunWith(SpringRunner.class)
@SpringBootTest
public class DatabaseTest {
@Resource
private KafkaProducer kafkaProducer;
@Test
public void test02() {
String topic = "test02";
String message;
for (int i = 0; i < 3; i++) {
JSONObject jsonObject = new JSONObject();
jsonObject.put(String.valueOf(i), new Date().toString());
message = JSONObject.toJSONString(jsonObject);
kafkaProducer.send(topic, message);
}
}
@Test
public void test01() {
String topic = "test01";
String message;
for (int i = 0; i < 100; i++) {
message = i + new Date().toString();
kafkaProducer.send(topic, message);
}
}
}
六、测试
GitHub: link. 欢迎star