springboot+kafka消息的生产和消费

项目pom

    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.3.9.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>=
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka-test</artifactId>
        </dependency>
    </dependencies>

aplication.yml

spring: 
  kafka:
    bootstrap-servers: 192.168.79.130:9092
    producer:
      # 发生错误后,消息重发的次数。
      retries: 3
      #当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
      batch-size: 16384
      # 设置生产者内存缓冲区的大小。
      buffer-memory: 33554432
      # 键的序列化方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      # 值的序列化方式
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      # acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
      # acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
      # acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
      acks: all
    consumer:
      # 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
      auto-commit-interval: 1S
      # 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
      # latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
      # earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
      auto-offset-reset: latest
      # 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
      enable-auto-commit: false
      # 键的反序列化方式
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      # 值的反序列化方式
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      # 在侦听器容器中运行的线程数。
      concurrency: 5
      #listner负责ack,每调用一次,就立即commit
      ack-mode: manual_immediate
      #没有topic容器是否启动失败
      missing-topics-fatal: false
      #listner可以批量次消费消息
      type: batch

代码解析

@Configuration
@EnableKafka
@Slf4j
public class KafkaConfig {

    @Bean
    public ConsumerAwareListenerErrorHandler consumerAwareErrorHandler() {
        return (message, exception, consumer) -> {
            log.error("--- consumerAwareErrorHandler ---");
            log.error("exception message {},message{}", exception,message.getPayload());
            return null;
        };
    }

}

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
KafkaAutoConfiguration 源码,只截取本次用到的内容
当开发人员没有在config文件中自定义bean,系统将根据application.yml中配置的kafka信息自动创建bean

@Configuration(
    proxyBeanMethods = false
)
@ConditionalOnClass({KafkaTemplate.class}) // 导入kafka的jar本类生效
@EnableConfigurationProperties({KafkaProperties.class}) // 生效kafka的配置参数,即application.yml的参数
@Import({KafkaAnnotationDrivenConfiguration.class, KafkaStreamsAnnotationDrivenConfiguration.class}) // 导入kafka注解相关配置,后续会看
public class KafkaAutoConfiguration {
    private final KafkaProperties properties;

    public KafkaAutoConfiguration(KafkaProperties properties) {
        this.properties = properties;
    }

    @Bean
    @ConditionalOnMissingBean({KafkaTemplate.class}) // 不存在自定义KafkaTemplate时,自动创建
    public KafkaTemplate<?, ?> kafkaTemplate(ProducerFactory<Object, Object> kafkaProducerFactory, ProducerListener<Object, Object> kafkaProducerListener, ObjectProvider<RecordMessageConverter> messageConverter) {
        KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate(kafkaProducerFactory);
        messageConverter.ifUnique(kafkaTemplate::setMessageConverter);
        kafkaTemplate.setProducerListener(kafkaProducerListener);
        kafkaTemplate.setDefaultTopic(this.properties.getTemplate().getDefaultTopic());
        return kafkaTemplate;
    }

    @Bean
    @ConditionalOnMissingBean({ConsumerFactory.class}) // 不存在自定义ConsumerFactory时,自动创建
    public ConsumerFactory<?, ?> kafkaConsumerFactory(ObjectProvider<DefaultKafkaConsumerFactoryCustomizer> customizers) {
        DefaultKafkaConsumerFactory<Object, Object> factory = new DefaultKafkaConsumerFactory(this.properties.buildConsumerProperties());
        customizers.orderedStream().forEach((customizer) -> {
            customizer.customize(factory);
        });
        return factory;
    }

    @Bean
    @ConditionalOnMissingBean({ProducerFactory.class}) // 不存在自定义ProducerFactory时,自动创建
    public ProducerFactory<?, ?> kafkaProducerFactory(ObjectProvider<DefaultKafkaProducerFactoryCustomizer> customizers) {
        DefaultKafkaProducerFactory<?, ?> factory = new DefaultKafkaProducerFactory(this.properties.buildProducerProperties());
        String transactionIdPrefix = this.properties.getProducer().getTransactionIdPrefix();
        if (transactionIdPrefix != null) {
            factory.setTransactionIdPrefix(transactionIdPrefix);
        }

        customizers.orderedStream().forEach((customizer) -> {
            customizer.customize(factory);
        });
        return factory;
    }

在这里插入图片描述
两个import的文件,我们只用到kafka,所以只看KafkaAnnotationDrivenConfiguration

@Configuration(
    proxyBeanMethods = false
)
@ConditionalOnClass({EnableKafka.class})  // 导入spring-kafka的jar包,本类生效
class KafkaAnnotationDrivenConfiguration {
    private final KafkaProperties properties;
    private final RecordMessageConverter messageConverter;
    private final BatchMessageConverter batchMessageConverter;
    private final KafkaTemplate<Object, Object> kafkaTemplate;
    private final KafkaAwareTransactionManager<Object, Object> transactionManager;
    private final ConsumerAwareRebalanceListener rebalanceListener;
    private final ErrorHandler errorHandler;
    private final BatchErrorHandler batchErrorHandler;
    private final AfterRollbackProcessor<Object, Object> afterRollbackProcessor;
    private final RecordInterceptor<Object, Object> recordInterceptor;

    KafkaAnnotationDrivenConfiguration(KafkaProperties properties, ObjectProvider<RecordMessageConverter> messageConverter, ObjectProvider<BatchMessageConverter> batchMessageConverter, ObjectProvider<KafkaTemplate<Object, Object>> kafkaTemplate, ObjectProvider<KafkaAwareTransactionManager<Object, Object>> kafkaTransactionManager, ObjectProvider<ConsumerAwareRebalanceListener> rebalanceListener, ObjectProvider<ErrorHandler> errorHandler, ObjectProvider<BatchErrorHandler> batchErrorHandler, ObjectProvider<AfterRollbackProcessor<Object, Object>> afterRollbackProcessor, ObjectProvider<RecordInterceptor<Object, Object>> recordInterceptor) {
        this.properties = properties;
        this.messageConverter = (RecordMessageConverter)messageConverter.getIfUnique();
        this.batchMessageConverter = (BatchMessageConverter)batchMessageConverter.getIfUnique(() -> {
            return new BatchMessagingMessageConverter(this.messageConverter);
        });
        this.kafkaTemplate = (KafkaTemplate)kafkaTemplate.getIfUnique();
        this.transactionManager = (KafkaAwareTransactionManager)kafkaTransactionManager.getIfUnique();
        this.rebalanceListener = (ConsumerAwareRebalanceListener)rebalanceListener.getIfUnique();
        this.errorHandler = (ErrorHandler)errorHandler.getIfUnique();
        this.batchErrorHandler = (BatchErrorHandler)batchErrorHandler.getIfUnique();
        this.afterRollbackProcessor = (AfterRollbackProcessor)afterRollbackProcessor.getIfUnique();
        this.recordInterceptor = (RecordInterceptor)recordInterceptor.getIfUnique();
    }

    @Bean
    @ConditionalOnMissingBean // 当缺失自定义bean时创建
    ConcurrentKafkaListenerContainerFactoryConfigurer kafkaListenerContainerFactoryConfigurer() {
        ConcurrentKafkaListenerContainerFactoryConfigurer configurer = new ConcurrentKafkaListenerContainerFactoryConfigurer();
        configurer.setKafkaProperties(this.properties);
        MessageConverter messageConverterToUse = this.properties.getListener().getType().equals(Type.BATCH) ? this.batchMessageConverter : this.messageConverter;
        configurer.setMessageConverter((MessageConverter)messageConverterToUse);
        configurer.setReplyTemplate(this.kafkaTemplate);
        configurer.setTransactionManager(this.transactionManager);
        configurer.setRebalanceListener(this.rebalanceListener);
        configurer.setErrorHandler(this.errorHandler);
        configurer.setBatchErrorHandler(this.batchErrorHandler);
        configurer.setAfterRollbackProcessor(this.afterRollbackProcessor);
        configurer.setRecordInterceptor(this.recordInterceptor);
        return configurer;
    }

    @Bean
    @ConditionalOnMissingBean(
        name = {"kafkaListenerContainerFactory"}
    )// 当缺失kafkaListenerContainerFactory时创建,根据application.yml配置创建
    ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(ConcurrentKafkaListenerContainerFactoryConfigurer configurer, ObjectProvider<ConsumerFactory<Object, Object>> kafkaConsumerFactory) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory();
        configurer.configure(factory, (ConsumerFactory)kafkaConsumerFactory.getIfAvailable(() -> {
            return new DefaultKafkaConsumerFactory(this.properties.buildConsumerProperties());
        }));
        return factory;
    }

    @Configuration(
        proxyBeanMethods = false
    )
    @EnableKafka
    @ConditionalOnMissingBean(
        name = {"org.springframework.kafka.config.internalKafkaListenerAnnotationProcessor"}
    )
    static class EnableKafkaConfiguration {
        EnableKafkaConfiguration() {
        }
    }
}

接下来我们来看EnableKafka
在这里插入图片描述
在这里插入图片描述
最后我们会看到会在容器里注册了KafkaListener注解的后置处理器KafkaListenerAnnotationBeanPostProcessor,和KafkaListener的管理KafkaListenerEndpointRegistry

public class KafkaBootstrapConfiguration implements ImportBeanDefinitionRegistrar {
    public KafkaBootstrapConfiguration() {
    }

    public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata, BeanDefinitionRegistry registry) {
        if (!registry.containsBeanDefinition("org.springframework.kafka.config.internalKafkaListenerAnnotationProcessor")) {
            registry.registerBeanDefinition("org.springframework.kafka.config.internalKafkaListenerAnnotationProcessor", new RootBeanDefinition(KafkaListenerAnnotationBeanPostProcessor.class));
        }

        if (!registry.containsBeanDefinition("org.springframework.kafka.config.internalKafkaListenerEndpointRegistry")) {
            registry.registerBeanDefinition("org.springframework.kafka.config.internalKafkaListenerEndpointRegistry", new RootBeanDefinition(KafkaListenerEndpointRegistry.class));
        }

    }
}

KafkaListenerAnnotationBeanPostProcessor处理@KafkaListener和@KafkaListeners的注解
KafkaListenerEndpointRegistry管理kafka的listener容器
这两个类代码比较多,大家可以自己看看
还有这个类ConcurrentMessageListenerContainer有相关的逻辑,可以看一下。

生产者逻辑

@RestController
@Slf4j
public class KafkaController {

    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;

    //自定义topic
    public static final String TOPIC_TEST = "topic.test2";

    @GetMapping("sendstr/{message}")
    public void sendStrKafkaMessage(@PathVariable("message") String message) {
        //发送消息
        ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(TOPIC_TEST, message);
        future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
            @Override
            public void onFailure(Throwable throwable) {
                //发送失败的处理
                log.info(TOPIC_TEST + " - 生产者 发送消息失败:" + throwable.getMessage());
            }

            @Override
            public void onSuccess(SendResult<String, Object> stringObjectSendResult) {
                //成功的处理
                log.info(TOPIC_TEST + " - 生产者 发送消息成功:" + stringObjectSendResult.toString());
            }
        });
    }

}

消费者逻辑

@Component
@Slf4j
public class KafkaListenerComponent {
    @KafkaListener(topics = {"topic.test2"},
                   groupId = "topic.group1")
    public void kafkaTest(String record, Acknowledgment ack,
                          @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) {
        log.info("topic_test 消费了: Topic:" + topic + ",Message:" + record);
        ack.acknowledge();
    }

    @KafkaListener(topics = {"topic.test2"},
                   groupId = "topic.group2",
                   errorHandler = "consumerAwareErrorHandler")
    public void listenBatchInfo(List<String> records, Acknowledgment acknowledgment) {
        log.info("-------listenBatchInfo-------,message size is {}", records.size());
        records.stream()
                .forEach(record -> log.info(record));
        // 手动确认消息
        acknowledgment.acknowledge();
    }
}

测试结果

在这里插入图片描述

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值