springboot集成kafka,监听消费消息

kafkaConfig


@Slf4j
@Component
@ConditionalOnProperty(value = "middle.kafka.enabled", matchIfMissing = false)
public class KafkaConfig {

    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServer;

    @Value("${spring.kafka.consumer.enable-auto-commit:false}")
    private Boolean enableAutoCommit;

    @Value("${spring.kafka.consumer.auto-commit-interval:1000}")
    private String autoCommitInterval;

    @Value("${spring.kafka.consumer.auto-offset-reset:earliest}")
    private String autoOffsetReset;

    @Value("${spring.kafka.consumer.max-poll-records:100}")
    private String maxPollRecords;

    @Value("${spring.kafka.consumer.key-deserializer:org.apache.kafka.common.serialization.StringDeserializer}")
    private String keyDeserializer;

    @Value("${spring.kafka.consumer.value-deserializer:org.apache.kafka.common.serialization.StringDeserializer}")
    private String valueDeserializer;

    @Value("${spring.kafka.listener.concurrency:1}")
    private Integer concurrency;

    @Value("${spring.kafka.listener.ack-mode:MANUAL_IMMEDIATE}")
    private String ackMode;

    @Value("${middle.kafka.listener.missing-topics-fatal:false}")
    private Boolean missingTopicsFatal;

    @Bean(name = "middleGroundKafkaListenerContainerFactory")
    KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setBatchListener(true);
        // 并发数 多个微服务实例会均分
        factory.setConcurrency(this.concurrency);
        factory.setMissingTopicsFatal(missingTopicsFatal);
        ContainerProperties containerProperties = factory.getContainerProperties();
        // 是否设置手动提交 MANUAL_IMMEDIATE:手动提交
        containerProperties.setAckMode(ackMode(this.ackMode));
        return factory;

    }

    private ContainerProperties.AckMode ackMode(String ackMode) {
        ContainerProperties.AckMode mode = ContainerProperties.AckMode.valueOf(ackMode);
        return mode;
    }

    private ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> consumerConfigs = consumerConfigs();
        log.info("消费者的配置信息:{}", JSON.toJSONString(consumerConfigs));
        return new DefaultKafkaConsumerFactory<>(consumerConfigs);
    }


    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> propsMap = new HashMap<>();
        // 服务器地址
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.bootstrapServer);
        // 是否自动提交
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, this.enableAutoCommit);
        // 自动提交间隔
        //    propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        //会话时间
        //    propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, kafkaListenerProperties.getSessionTimeOut());
        //key序列化
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, this.keyDeserializer);
        //value序列化
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, this.valueDeserializer);
        // 心跳时间
        //    propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, kafkaListenerProperties.getHeartbeatInterval());

        // 分组id
        //    propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaListenerProperties.getGroupId());
        //消费策略
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, this.autoOffsetReset);
        // poll记录数
        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, this.maxPollRecords);
        //poll时间
        //    propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, kafkaListenerProperties.getMaxPollInterval());
        return propsMap;
    }

    @Bean
    public KafkaConsumer initKafkaConsumer() {
        return new KafkaConsumer<>(consumerConfigs());
    }

    @Bean
    public AdminClient initAdminClient() {
        return AdminClient.create(consumerConfigs());
    }


}


kafka生产者

    KafkaTemplate kafkaTemplate;
    
    public void sendMsg(){
        kafkaTemplate.send("topic",msg);
    }

KafkaConsumer 消费者

@Slf4j
@Component
@ConditionalOnProperty(value = "middle.kafka.enabled", matchIfMissing = false)
public class KafkaConsumer {

    @Value("${spring.kafka.enabled}")
    private boolean enabled;


    @KafkaListener(topics = "${middle.kafka.consumer.topic:}",  //topic
            groupId = "${middle.kafka.consumer.group-id:group-1}",  //分组
            containerFactory = "middleGroundKafkaListenerContainerFactory")
    public void pgSqlConsumer(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        if (!enabled) {
            log.info("消费kafka消息开关关闭" );
        }
        log.info("开始消费kafka消息");
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后,批量提取
                Optional<String> message = Optional.ofNullable(record.value());
                if (message.isPresent()) {
                    String msg = message.get();
                 
              
                }
            }
        } catch (Exception e) {
            log.error("msg error;{},msg={}", e, records);
        } finally {
            ack.acknowledge();//手动提交offset
        }
    }
}

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值