一、生产者 producer
1.配置
spring:
kafka:
bootstrap-servers: 192.168.4.1:9092,192.168.4.2:9092,192.168.4.3:9092
producer:
# 发生错误后重发的次数
retries: 1
#内存大小
batch-size: 163840
# 缓冲区的大小
buffer-memory: 33554432
# 键值的序列化方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应
# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应
# acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应
acks: 1
2.发送消息
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class KafkaProducerController {
@Autowired
private KafkaTemplate<String, Object> kafkaTemplate;
@GetMapping("/send-message")
public void sendMessage(@RequestParam("message") String message) {
kafkaTemplate.send("topic", message);
}
}
3.发送结果回调类
@Component
public class KafkaSendResultHandler implements ProducerListener {
private static final Logger log = LoggerFactory.getLogger(KafkaSendResultHandler.class);
@Override
public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) {
log.info("Message send success : " + producerRecord.toString());
}
@Override
public void onError(ProducerRecord producerRecord, Exception exception) {
log.info("Message send error : " + producerRecord.toString());
}
}
二、消费者
1.配置
spring:
kafka:
bootstrap-servers: 192.168.4.1:9092,192.168.4.2:9092,192.168.4.3:9092
consumer:
#全局消费组id
group-id: consumer-1
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
enable-auto-commit: true
# 自动提交的时间间隔
auto-commit-interval: 10S
# 批量消费每次最多消费多少条消息
max-poll-records: 10
# latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: latest
# 心跳间隔
heartbeat-interval: 3S
# 键值的反序列化方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
#开启批量监听
type: batch
# 在侦听器容器中运行的线程数。
concurrency: 4
#listner负责ack,每调用一次,就立即commit
#ack-mode: manual_immediate
2.消费消息
@Component
public class KafkaConsumerService implements ConsumerSeekAware {
@Resource
private NioEventLoopGroup kafkaConsumerGroup;
@Autowired
private SdkMessageHandler sdkMessageHandler;
@KafkaListener(topics = "topic")
public void sdkRsuBatchConsumer(List<ConsumerRecord<String, String>> records) {
if (StringUtils.isNotEmpty(records)) {
records.forEach(record -> kafkaConsumerGroup.execute(()-> messageHandler.process(record.value())));
}
}
@Override
public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
assignments.forEach((t, o) -> callback.seekToEnd(t.topic(), t.partition()));
}
}