SpringBoot——》集成Kafka示例

74 篇文章 6 订阅
5 篇文章 0 订阅

推荐链接:
    总结——》【Java】
    总结——》【Mysql】
    总结——》【Redis】
    总结——》【Spring】
    总结——》【SpringBoot】
    总结——》【MyBatis、MyBatis-Plus】

一、pom

<properties>
  <spring.kafka.version>2.4.3.RELEASE</spring.kafka.version>
  <kafka-client.version>2.4.1</kafka-client.version>
</properties>

<dependencyManagement>
  <dependencies>
    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-clients</artifactId>
      <version>${kafka-client.version}</version>
    </dependency>
    <dependency>
      <groupId>org.springframework.kafka</groupId>
      <artifactId>spring-kafka</artifactId>
      <version>${spring.kafka.version}</version>
    </dependency>
  </dependencies>	
</dependencyManagement>

二、配置参数

1、生产者

spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
spring.kafka.producer.retries = 0
spring.kafka.producer.acks = 1
spring.kafka.producer.batch-size = 16384
spring.kafka.producer.buffer-memory = 33554432
spring.kafka.producer.key-serializer = org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer = org.apache.kafka.common.serialization.StringSerializer

2、消费者

spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
spring.kafka.consumer.auto-commit-interval = 1S
spring.kafka.consumer.auto-offset-reset = earliest
spring.kafka.consumer.enable-auto-commit = true
spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.listener.concurrency = 5
spring.kafka.consumer.group-id = g1

三、消费者配置类KafkaConsumerConfig.java

import com.eju.goodhouse.service.business.consumer.SyncEsfCommunityComsumer;
import jodd.util.StringUtil;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;


@Configuration
    @EnableKafka
    public class KafkaConsumerConfig {

        @Value("${spring.kafka.bootstrap-servers}")
        private String broker;

        @Value("${spring.kafka.consumer.group-id}")
        private String groupId;

        @Value("${spring.kafka.consumer.auto-offset-reset}")
        private String autoOffsetReset;

        @Value("${spring.kafka.consumer.enable-auto-commit}")
        private String enableAutoCommit;

        public Map<String, Object> consumerConfigs(String consumerGroupId) {
            Map<String, Object> propsMap = new HashMap<>();
            // kafka服务地址
            propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, broker);
            // 消费后是否自动提交,true自动,false手动
            propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
            // 获取消息后提交偏移量的最大时间
            propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
            // 超时时间,服务端没有收到心跳就会认为当前消费者失效
            propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
            // 序列化
            propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            // 默认消费组
            propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, StringUtil.isNotBlank(consumerGroupId) ? consumerGroupId : groupId);
            // earliest从头开始消费、latest获取最新消息 、none
            propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
            propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
            return propsMap;
        }

        public ConsumerFactory<String, String> consumerFactory(String consumerGroupId) {
            return new DefaultKafkaConsumerFactory<>(consumerConfigs(consumerGroupId));
        }

        @Bean("kafkaListenerContainerFactory")
        KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
            ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
            // consumerGroupId为空时,会用默认的groupId
            factory.setConsumerFactory(consumerFactory("g1"));
            factory.setConcurrency(4);
            // 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
        }

        @Bean("kafkaListenerContainerFactory2")
        KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory2() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        // consumerGroupId为空时,会用默认的groupId
        factory.setConsumerFactory(consumerFactory("g2"));
        factory.setConcurrency(1);
        // 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.getContainerProperties().setAckCount(10);
        factory.getContainerProperties().setAckTime(10000);
        return factory;
        }

        @Bean
        public SyncEsfCommunityComsumer listenerForSyncEsfCommunity() {
        return new SyncEsfCommunityComsumer();
        }
        }

四、消费者SyncEsfCommunityComsumer.java

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;

import java.util.List;
import java.util.Optional;

@Slf4j
public class SyncEsfCommunityComsumer {

    @KafkaListener(id = "listenerForSyncEsfCommunity", topics = "${monitor.house-asset-community.topic}", containerFactory = "kafkaListenerContainerFactory")
    public void listenerForSyncEsfCommunity(List<ConsumerRecord<?, ?>> records) throws Exception {
        log.info("【listenerForSyncEsfCommunity】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
        for (ConsumerRecord<?, ?> record : records) {
            Optional<?> kafkaMessage = Optional.ofNullable(record.value());
            if (kafkaMessage.isPresent()) {
                // TODO 业务处理
            }
        }
    }

    @KafkaListener(id = "listenerForSyncEsfRegion", topics = "${monitor.house-asset-region.topic}", containerFactory = "kafkaListenerContainerFactory2")
    public void listenerForSyncEsfRegion(List<ConsumerRecord<?, ?>> records) throws Exception {
        log.info("【listenerForSyncEsfRegion】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
        for (ConsumerRecord<?, ?> record : records) {
            Optional<?> kafkaMessage = Optional.ofNullable(record.value());
            if (kafkaMessage.isPresent()) {
                // TODO 业务处理
            }
        }
    }

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值