已经在docker上安装zookeeper和kafka
现在尝试在springBoot中集成kafka
父级目录
为了方便测试,将consumer和produce放在同一个父级项目中
pom.xml中配置
<parent>
<artifactId>spring-boot-parent</artifactId>
<groupId>org.springframework.boot</groupId>
<version>2.2.5.RELEASE</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
</dependencies>
produce
application.yml文件内容
server:
port: 8001
servlet:
context-path: /producer
#Spring 整合kafka集群
spring:
kafka:
bootstrap-servers: 127.0.0.1:9092
producer:
retries: 0 #kafka producer 发送消息失败时候的重试次数
batch-size: 16384 # 批量发送数据的配置
buffer-memory: 33554432 #设置kafka生产者内存缓存去的大小 32m
key-serializer: org.apache.kafka.common.serialization.StringSerializer #kafka消息序列化配置
value-serializer: org.apache.kafka.common.serialization.StringSerializer #kafka消息序列化配置
# (1)acks=0 意味着生产者能够通过网络吧消息发送出去,那么就认为消息已成功写入Kafka 一定会丢失一些数据
# (2) acks=1:这 意味着至少要等待leader 已经成功将数据写入本地log,但是并没有等 待所有follower 是否成功写入。这种情况下,
# 如果follower没有成功备份数据,而此时 leader又挂掉,则消息会丢失。↓
# (3) 如果acks=all / -1,只有在集群所有的跟随副本都接收到消息后,生产者才会受到一个来自服务器的成功响应。这种模式是最安全的,
# 它可以保证集群中不止一个服务器接收到消息,就算有服务器崩溃了,这个集群还是能够正常运行。
# 不过它比acks=1的延迟性更高,因为生产者要等待的所有参与复制消息的节点接收到消息。
acks: all
代码。KafkaProducerService(注入到spring中。每次发送消息的时候使用这个发消息):
package com.item.kafka.producer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import javax.annotation.Resource;
@Slf4j
@Component
public class KafkaProducerService {
@Resource
private KafkaTemplate<String, Object> kafkaTemplate;
public void sendMessage(String topic, Object object) {
ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(topic, object);
future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
public void onFailure(Throwable throwable) {
log.error("发送消息失败:", throwable.getMessage());
}
public void onSuccess(SendResult<String, Object> result) {
log.info("发送消息成功:", result.toString());
}
});
}
}
在test中测试:
package com.item.kafka.producer.test;
import com.item.kafka.producer.KafkaProducerService;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class ApplicationTest {
@Autowired
private KafkaProducerService kafkaProducerService;
final static String topic2 = "topic02";
final static String topic = "topic01";
@Test
public void send() throws Exception {
for (int i = 0; i < 100; i++) {
// Thread.sleep(50);
if (i % 2 == 0) {
kafkaProducerService.sendMessage(topic, "1hello kafka" + i);
} else {
kafkaProducerService.sendMessage(topic2, "2hello kafka" + i);
}
}
// Thread.sleep(Integer.MAX_VALUE);
}
}
consumer:
application.yml
server:
servlet:
context-path: /consumer
port: 8002
spring:
kafka:
bootstrap-servers: 127.0.0.1:9092
consumer:
enable-auto-commit: false #consumer消息签收机制:手工签收(接收到消息之后必须手工签收-不然下次还会接收相同消息)
# 该属性指定了消费者在读取一- 个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# latest (默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: earliest
#序列化配置
key-serializer: org.apache.kafka.common.serialization.StringDeserializer #kafka消息序列化配置
value-serializer: org.apache.kafka.common.serialization.StringDeserializer #kafka消息序列化配置
# MANUAL-消息侦听器负责确认(Acknowledgment);之后,将应用与BATCH相同的语义。
# MANUAL_IMMEDIATE-侦听器调用Acknowledgment.acknowledge()方法时,立即提交偏移量。
listener:
ack-mode: manual
concurrency: 5 #并行度
接收消息代码:
package com.item.kafka.consumer.Service;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
/**
*接收消息
*/
@Slf4j
@Component
public class KafkaConsumerService {
@KafkaListener(groupId = "group01", topics = "topic01")
public void onMessage(ConsumerRecord<String, Object> record, Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
record.value();//消息内容
log.info("1消费短消息:{}", record.value());
//手工签收机制
acknowledgment.acknowledge();
}
@KafkaListener(groupId = "group01", topics = "topic02")
public void onMessage2(ConsumerRecord<String, Object> record, Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
record.value();//消息内容
log.info("2消费短消息:{}", record.value());
//手工签收机制
acknowledgment.acknowledge();
}
}
接下来测试看看:
问题:
1:如果分组不同,则第一次只会获取最后一次创建的分组的数据
比如,下图,第一次启动ProducerApplication,会先将group02-topic02的消息消费完毕,第二次启动才会消费group01-topic01数据