SpringBoot 集成 Kafka 配置

原生模式

        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>3.0.0</version>
        </dependency>

自定义分区器

/**
 * 自定义分区器
 *
 * @Author: chen yang
 * @Date: 2023/5/7 11:34
 */
public class CustomerPartitioner implements Partitioner {
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {

        List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
        int partition = 2;
        return partitionInfos.size() % partition;
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

生产者

/**
 * @Author: chen yang
 * @Date: 2023/5/6 21:59
 */
public class Producer {
    public static void main(String[] args) throws ExecutionException, InterruptedException {

        Properties properties = new Properties();
        // 连接集群
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

        // 序列化类型
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        // 添加自定义分区器
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, CustomerPartitioner.class.getName());


        // batch.size:批次大小,默认 16K
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        // linger.ms:等待时间,默认 0
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        // RecordAccumulator:缓冲区大小,默认 32M:buffer.memory
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        // compression.type:压缩,默认 none,可配置值 gzip、snappy、lz4 和 zstd
        properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");


        // 设置 ack
        // 0:不需要等待数据落盘应答,1:需要等 leader 落盘应答,-1(all):需要等 leader 和所有的 follower(isr队列) 落盘应答
        // type: String, valid values [all, -1, 0, 1], default: all
        properties.put(ProducerConfig.ACKS_CONFIG, "all");

        // 设置重试次数,默认为 int 最大值
        properties.put(ProducerConfig.RETRIES_CONFIG, 3);

        // 设置事务id
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "default_transactional_id_23");

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        // 开启事务
        producer.initTransactions();
        producer.beginTransaction();

        try {
            // 异步发送
            producer.send(new ProducerRecord<>("test", "this is async message"));

            producer.send(new ProducerRecord<>("test", "this is a async rollback message!"), new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    // 发送消息失败会自动重试,不需要在回调函数中手动重试
                    if (Objects.isNull(e)){
                        System.out.println("result: " + recordMetadata.topic() + ", partitions: " + recordMetadata.partition());
                    }
                }
            });

            // 同步发送,只需要在异步发送的基础上再调用 get() 犯法即可
            producer.send(new ProducerRecord<>("test", 1,"","this is sync message")).get();
            producer.commitTransaction();

        }catch (Exception e){
            producer.abortTransaction();
        }finally {
            producer.close();
        }
    }
}

消费者

/**
 * @Author: chen yang
 * @Date: 2023/7/9 10:37
 */
public class Consumer {

    public static void main(String[] args) {

        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_01");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
              // 订阅 topic
//            ArrayList<String> topics = new ArrayList<>();
//            consumer.subscribe(topics);

            // 订阅 topic 下的 partition
            ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
            topicPartitions.add(new TopicPartition("night_topic", 1));
            consumer.assign(topicPartitions);

            // 从指定的 offset 开始消费
//            consumer.seek(new TopicPartition("night_topic", 1), 3);

            ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(2));
            consumerRecords.forEach(System.out::println);

            // 手动提交 offset
            consumer.commitAsync();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}

KafkaTemplate

        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

配置文件

spring:
  kafka:
    bootstrap-servers: localhost:9092
    producer:
      batch-size: 16384
      acks: -1
      retries: 10
      transaction-id-prefix: transaction_05
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      properties:
        linger:
          ms: 2000
        partitioner:
          class: com.night.config.CustomerPartitionHandler
    consumer:
      group-id: g_01
      enable-auto-commit: false
      auto-offset-reset: latest
      max-poll-records: 500
#      auto-commit-interval: 2000  autoCommit = false
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      properties:
        session:
          timeout:
            ms: 120000 # 消费会话超时时间(超过这个时间 consumer 没有发送心跳,就会触发 rebalance 操作)
        request:
          timeout:
            ms: 18000

    listener:
      missing-topics-fatal: false # consumer listener topics 不存在时,启动项目就会报错
      type: batch

自定义分区器

/**
 * @Author: chen yang
 * @Date: 2023/7/8 11:02
 */
@Component
public class CustomerPartitionHandler implements Partitioner {

    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        if (value.toString().contains("二")){
            return 2;
        }else if (value.toString().contains("一")){
            return 1;
        }else {
            return 0;
        }
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

生产者

@RestController
@RequiredArgsConstructor
public class HelloController {

    private final KafkaTemplate<String, String> kafkaTemplate;

    @GetMapping("/send")
    @Transactional // 配置文件中设置了事务id,那么启用时要加上 该注解或者使用 kafka 事务处理
    public Boolean send(String msg){
        for (int i = 0; i < 10; i++) {
            kafkaTemplate.send("night.topic", null, "night key - " + i, msg + " - " + i).addCallback(success -> {
                // 成功回调
                if (success == null || success.getRecordMetadata() == null){
                    return;
                }
                String topic = success.getRecordMetadata().topic();
                int partition = success.getRecordMetadata().partition();
                long offset = success.getRecordMetadata().offset();
                String key = success.getProducerRecord().key();
                System.out.println("send topic:" + topic +", partition: " + partition + ", key:" + key + ", offset: " + offset);
            }, failure -> {
                // 失败回调
                System.out.println("发送消息失败:" + failure.getMessage());
            });
        }
        return true;
    }
}

消费者配置

消费数据过滤
/**
 * 消费数据过滤
 *
 * @Author: chen yang
 * @Date: 2023/7/8 12:20
 */
@Component
public class ConsumerFilterStrategy implements RecordFilterStrategy<String, String> {
    @Override
    public boolean filter(ConsumerRecord<String, String> consumerRecord) {
        // return true: 丢弃消息
        return consumerRecord.value().contains("无效数据");
    }
}

消费异常处理类
/**
 * 消费异常处理类
 *
 * @Author: chen yang
 * @Date: 2023/7/8 11:55
 */
@Component
public class ConsumerExceptionHandler implements ConsumerAwareListenerErrorHandler {

    @Override
    public Object handleError(Message<?> message, ListenerExecutionFailedException e, Consumer<?, ?> consumer) {
        System.out.println("消费异常:" + message.getPayload() + ", ex: " + e.getMessage());
        return null;
    }
}

消费者配置 
/**
 * @Author: chen yang
 * @Date: 2023/7/8 12:22
 */
@Configuration
@RequiredArgsConstructor
public class KafkaConsumerConfig {

    private final KafkaTemplate<String, String> kafkaTemplate;

    private final ConsumerFilterStrategy consumerFilterStrategy;


    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> filterContainerFactory(){
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        // @SendTo 使用,console exception:  a KafkaTemplate is required to support replies
        factory.setReplyTemplate(kafkaTemplate);

        factory.setConsumerFactory(initConsumerFactory());
        // 设置并发量,小于或等于Topic的分区数,并且要在consumerFactory设置一次拉取的数量
        factory.setConcurrency(1);

        // 设置为批量监听
        factory.setBatchListener(true);

        // 配合RecordFilterStrategy使用,被过滤的信息将被丢弃
        factory.setAckDiscarded(true);
        factory.setRecordFilterStrategy(consumerFilterStrategy);
        return factory;
    }



    @Bean
    public ConsumerFactory<String, String> initConsumerFactory(){
        HashMap<String, Object> configs = new HashMap<>();
        configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        configs.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_id_02");
        configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        configs.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
        configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        configs.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
        configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        return new DefaultKafkaConsumerFactory<>(configs);
    }
}

@KafkaListener

@Component
public class HelloListener {

    @KafkaListener(topicPartitions = {
            @TopicPartition(topic = "night.topic", partitions = {"0"})
    }, errorHandler = "consumerExceptionHandler", containerFactory = "filterContainerFactory")
    public void consumer0(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
        System.out.println("first consumer receive list size: " + records.size());
        records.forEach(System.out::println);
        consumer.commitSync();
    }


    @KafkaListener(topicPartitions = {
            @TopicPartition(topic = "night.topic", partitions = {"1"})
    }, errorHandler = "consumerExceptionHandler")
    @SendTo("singleTopic")
    public String consumer1(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
        System.out.println("second consumer receive list size: " + records.size());
        records.forEach(System.out::println);
        consumer.commitSync();
        return "@SendTo annotation msg";
    }


    @KafkaListener(topicPartitions = {
            @TopicPartition(topic = "night.topic", partitions = {"2"})
    }, errorHandler = "consumerExceptionHandler")
    public void consumer2(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
        System.out.println("third consumer receive list size: " + records.size());
        records.forEach(System.out::println);
        consumer.commitSync();
    }
}

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: Spring Boot集成Kafka配置步骤如下: 1. 添加Kafka依赖 在pom.xml文件中添加Kafka依赖: ``` <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> <version>2.5.5.RELEASE</version> </dependency> ``` 2. 配置Kafka连接信息 在application.properties文件中配置Kafka连接信息: ``` spring.kafka.bootstrap-servers=localhost:9092 spring.kafka.consumer.group-id=my-group spring.kafka.consumer.auto-offset-reset=earliest ``` 3. 创建Kafka生产者 使用Spring BootKafkaTemplate类创建Kafka生产者: ``` @Autowired private KafkaTemplate<String, String> kafkaTemplate; public void sendMessage(String topic, String message) { kafkaTemplate.send(topic, message); } ``` 4. 创建Kafka消费者 使用@KafkaListener注解创建Kafka消费者: ``` @KafkaListener(topics = "my-topic", groupId = "my-group") public void listen(String message) { System.out.println("Received message: " + message); } ``` 以上就是Spring Boot集成Kafka配置步骤。 ### 回答2: Spring Boot是一个现代化的Java Web开发框架,简化了传统的Java Web开发流程,而Kafka则是一个分布式消息系统,可用于快速而可靠地处理大量数据和消息。 Spring Boot集成Kafka主要需要进行以下步骤: 1. 配置Maven依赖 在pom.xml文件中添加依赖: ``` <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> <version>2.3.3.RELEASE</version> </dependency> ``` 2. 配置Kafka参数 在application.yml文件中添加Kafka配置参数,例如: ``` spring: kafka: bootstrap-servers: localhost:9092 consumer: group-id: group1 auto-offset-reset: earliest key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer producer: retries: 0 batch-size: 16384 linger-ms: 1 buffer-memory: 33554432 key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.StringSerializer ``` 其中,`bootstrap-servers`是Kafka服务器的地址,`group-id`是消费者组的ID,`auto-offset-reset`是消费者启动时的偏移量,`key-deserializer`和`value-deserializer`是反序列化器,`retries`是生产者重试次数,`batch-size`是每个批次消息的大小,`linger-ms`是等待消息传递时间,`buffer-memory`是缓存大小,`key-serializer`和`value-serializer`是序列化器。 3. 创建生产者和消费者 通过`@EnableKafka`注解开启Kafka支持,在需要使用Kafka的地方添加`@KafkaListener`注解标记监听器,例如: ``` @Service @EnableKafka public class KafkaConsumer { @KafkaListener(topics = "test") public void receive(ConsumerRecord<?, ?> consumerRecord) { System.out.println("消费消息:" + consumerRecord.value()); } } ``` 通过`KafkaTemplate`类创建生产者,例如: ``` @Service @EnableKafka public class KafkaProducer { @Autowired private KafkaTemplate<String, String> kafkaTemplate; public void sendMessage(String topic, String message) { kafkaTemplate.send(topic, message); System.out.println("生产者发送消息:" + message); } } ``` 其中,`topic`是消息主题,`message`是消息内容。 通过以上步骤,就可以完成Spring Boot集成Kafka配置和使用。 ### 回答3: Spring Boot 是一种流行的 Java Web 开发框架,而 Kafka 是一种流行的分布式消息队列系统。结合 Spring BootKafka 可以构建高效可靠的消息处理系统。 实现 Spring Boot 集成 Kafka 需要进行以下步骤: 1. 引入 Kafka 相关依赖 在 Spring Boot 项目的 pom.xml 中引入 Kafka 相关依赖,包括 spring-kafkakafka-clients。 2. 配置 Kafka 生产者和消费者 在 Spring Boot 项目的 application.yml 或 application.properties 文件中进行 Kafka 生产者和消费者的配置。具体配置包括 Kafka 服务器地址、端口号、topic 名称等。 3. 实现 Kafka 消费者 通过注解,实现 Kafka 消费者。使用 @KafkaListener 注解来标记一个方法,该方法可以处理消费者接收到的消息。例如: ```java @KafkaListener(topics = "${kafka.topic.name}", groupId = "${kafka.group.id}") public void listen(ConsumerRecord<String, String> record) { log.info("Received message: {}", record.value()); } ``` 其中,${kafka.topic.name} 和 ${kafka.group.id} 分别是配置文件中的 Kafka topic 名称和消费者组 ID。 4. 实现 Kafka 生产者 通过注入 KafkaTemplate,实现 Kafka 生产者。例如: ```java @Autowired private KafkaTemplate<String, String> kafkaTemplate; public void sendMessage(String message) { kafkaTemplate.send(topicName, message); } ``` 其中,topicName 是配置文件中配置Kafka topic 名称。 5. 启动应用程序 通过 Spring Boot 启动应用程序,即可开始接收和发送 Kafka 消息。 通过以上步骤,就可以完成 Spring Boot 集成 Kafka配置。在实际项目中,可以根据需要进行进一步的配置和定制,例如实现 Kafka 消息的序列化和反序列化、配置 Kafka 连接池等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值