spring boot使用kafka

1,kafka的broker依赖zookeeper,要先搭建zookeeper环境

2,下载kafka服务端并配置启动

下载后修改配置server.properties

zookeeper.connect=localhost:2181,localhost:2182,localhost:2183//zookeeper服务器

log.dirs=C:/Users/Northking/Desktop/software/kafka/kafka_2.13-3.5.1/logs//kafka日志文件路径

3,添加依赖

<dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

4,添加配置

spring:
  kafka:
    bootstrap-servers: localhost:9092;localhost:9093;localhost:9094
    producer:
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      acks: 1
      batch-size: 16384
      retries: 0
      buffer-memory: 33554432
    consumer:
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      enable-auto-commit: false
      auto-commit-interval: 1S
      group-id: test
      auto-offset-reset: latest
    listener:
      ack-mode: manual_immediate
      concurrency: 6
      missing-topics-fatal: false

4,代码实现发布者

@RestController
@RequestMapping("/kafka")
@Slf4j
public class KafkaController {
    @Resource
    private KafkaTemplate<String, Object> kafkaTemplate;

    @PostMapping("/send")
    public Object send(@RequestBody MessageTest messageTest) throws ExecutionException, InterruptedException, TimeoutException {
        ListenableFuture<SendResult<String, Object>> sendResult = kafkaTemplate.send("test2", messageTest);
        SendResult<String, Object> result = sendResult.get(3, TimeUnit.SECONDS);
        return result.getProducerRecord().value();
    }

    @PostMapping("/sendAck")
    public void sendAck(@RequestBody MessageTest messageTest) {
        kafkaTemplate.send("test3", String.format("消息内容:%s", messageTest)).
                addCallback(succsess ->
                {
                    String topic = succsess.getRecordMetadata().topic();
                    long offset = succsess.getRecordMetadata().offset();
                    int partition = succsess.getRecordMetadata().partition();
                    log.info("发送成功:topic->{},partition->{} , offset->{}", topic, partition, offset);
                }, failure ->
                {
                    log.info("发送失败:{}", failure.getMessage());
                });
    }
}

5,代码实现消费组

@Component
@Slf4j
public class KafkaListenerConfig {
    @KafkaListener(topics = {"test3"},groupId = "demo-g1")
    public void onMessage(ConsumerRecord<?, ?> record, Acknowledgment ack, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
        try {
            log.info("主题:topic:{},partition:{}",record.topic(),record.partition());
            log.info("消费消息内容:{}",record.value());
        } finally {
            ack.acknowledge();
        }
    }

    @KafkaListener(topics = {"test3","test4"},groupId = "demo-g1")
    public void onMessage2(ConsumerRecord<?, ?> record, Consumer<String,Object> consumer, Acknowledgment ack, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
        try {
            log.info("另一个消费组主题:topic:{},partition:{}",record.topic(),record.partition());
            log.info("消费消息内容:{}",record.value());
            consumer.commitSync();
        } finally {
            consumer.commitAsync();
//            ack.acknowledge();
        }
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值