SpringBoot整合Kafka

SpringBoot整合Kakfa

 

pom.xml文件引入kafka相关依赖

<dependencies>
    </dependency>
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>

    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-test</artifactId>
        <scope>test</scope>
    </dependency>
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka-test</artifactId>
        <scope>test</scope>
    </dependency>
</dependencies>

配置文件相关:

#============== kafka ===================
# 指定kafka server的地址,集群配多个,中间,逗号隔开
spring.kafka.bootstrap-servers=127.0.0.1:9092

#=============== provider  =======================
# 写入失败时,重试次数。当leader节点失效,一个repli节点会替代成为leader节点,此时可能出现写入失败,
# 当retris为0时,produce不会重复。retirs重发,此时repli节点完全成为leader节点,不会产生消息丢失。
spring.kafka.producer.retries=0
# 每次批量发送消息的数量,produce积累到一定数据,一次发送
spring.kafka.producer.batch-size=16384
# produce积累数据一次发送,缓存大小达到buffer.memory就发送数据
spring.kafka.producer.buffer-memory=33554432

#procedure要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下:
#acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。
#acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。
#acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
#可以设置的值为:all, -1, 0, 1
spring.kafka.producer.acks=1

# 指定消息key和消息体的编解码方式
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer

#=============== consumer  =======================
# 指定默认消费者group id --> 由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groud.id设置组名
spring.kafka.consumer.group-id=testGroup
# smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest
spring.kafka.consumer.auto-offset-reset=earliest
# enable.auto.commit:true --> 设置自动提交offset
spring.kafka.consumer.enable-auto-commit=true
#如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
spring.kafka.consumer.auto-commit-interval=100

# 指定消息key和消息体的编解码方式
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

相关配置类config

在引入springBoot后,就不需要做一些bean的配置,当然也可以通过自己的配置改变springBoot的默认配置
在没有引入springBoot的情况下需要做以下配置,以下配置包含生产者和消费者的配置,一般情况下可能只需要配置其中一个即可
@Configuration
@EnableKafka
public class KafkaConfiguration {

    //ConcurrentKafkaListenerContainerFactory为创建Kafka监听器的工程类,这里只配置了消费者
    @Bean
    public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        return factory;
    }

    //根据consumerProps填写的参数创建消费者工厂
    @Bean
    public ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerProps());
    }

    //根据senderProps填写的参数创建生产者工厂
    @Bean
    public ProducerFactory<Integer, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(senderProps());
    }

    //kafkaTemplate实现了Kafka发送接收等功能
    @Bean
    public KafkaTemplate<Integer, String> kafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
        return template;
    }

    //消费者配置参数
    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        //GroupID
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "bootKafka");
        //是否自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        //自动提交的频率
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
        //Session超时设置
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        //键的反序列化方式
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        //值的反序列化方式
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    //生产者配置
    private Map<String, Object> senderProps (){
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        //重试,0为不启用重试机制
        props.put(ProducerConfig.RETRIES_CONFIG, 1);
        //控制批处理大小,单位为字节
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        //批量发送,延迟为1毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量
        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        //生产者可以使用的总内存字节来缓冲等待发送到服务器的记录
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1024000);
        //键的序列化方式
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
        //值的序列化方式
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

}

 

同步发送消息Demo(直接使用KafkaTemplate)

@Component
@Slf4j
public class SendKafkaUtils {
    @Autowired
    KafkaTemplate kafkaTemplate;

    @Value("${kafka.topic}")
    private String topic;//topic名称

    /**
     * 发送标准消息到topic
     * @param topic 发送的topic
     * @param messageDTO 标准消息
     */
    public void sendToStandard(String topic, MessageDTO messageDTO) {
        try {
            ListenableFuture<SendResult<String, IntellimStandardDTO>> send = kafkaTemplate.send(topic, messageDTO.getKey(), messageDTO);
            final SendResult<String, messageDTO> result = send.get();
            if (!result.getRecordMetadata().hasOffset()) {
                throw new SendKafkaException("send kafka topic fail, topic=" + topic);
            }
        } catch (Exception e) {
            throw new SendKafkaException("send kafka topic fail, topic=" + topic, e);
        }
    }
    
    /**
     * 发送标准消息到topic
     * @param topic 发送的topic
     * @param o 消息
     */
    public static void sendToOut(String topic, Object o) {
        try {
            ListenableFuture<SendResult<String, Object>> send = kafkaTemplate.send(topic, o);
            final SendResult<String, Object> result = send.get();
            if (!result.getRecordMetadata().hasOffset()) {
                throw new SendKafkaException(String.format("send kafka topic fail, topic=%s, message=%s", topic, JsonUtils.objectToJson(o)));
            }
        } catch (Exception e) {
            throw new SendKafkaException(String.format("send kafka topic fail, topic=%s, message=%s", topic, JsonUtils.objectToJson(o)), e);
        }
    }

}

关于KafkaTemplate的发送和消息回调

查看kafkaTemplate中关于发送消息的源码:

ListenableFuture<SendResult<K, V>> sendDefault(V data);

ListenableFuture<SendResult<K, V>> sendDefault(K key, V data);

ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, K key, V data);

ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, Long timestamp, K key, V data);

ListenableFuture<SendResult<K, V>> send(String topic, V data);

ListenableFuture<SendResult<K, V>> send(String topic, K key, V data);

ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, K key, V data);

ListenableFuture<SendResult<K, V>> send(String topic, Integer partition, Long timestamp, K key, V data);

ListenableFuture<SendResult<K, V>> send(ProducerRecord<K, V> record);

ListenableFuture<SendResult<K, V>> send(Message<?> message);

其中的参数主要包括以下:

  • topic:这里填写的是Topic的名字
  • partition:这里填写的是分区的id,其实也是就第几个分区,id从0开始。表示指定发送到该分区中
  • timestamp:时间戳,一般默认当前时间戳
  • key:消息的键
  • data:消息的数据
  • ProducerRecord:消息对应的封装类,包含上述字段
  • Message<?>:Spring自带的Message封装类,包含消息及消息头

配置带有默认Topic参数的KafkaTemplate,同时为另外一个KafkaTemplate加上@Primary注解,@Primary注解的意思是在拥有多个同类型的Bean时优先使用该Bean,到时候方便我们使用@Autowired注解自动注入。

    @Bean
    @Primary
    public KafkaTemplate<Integer, String> kafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
        return template;
    }

    @Bean("defaultKafkaTemplate")
    public KafkaTemplate<Integer, String> defaultKafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
        template.setDefaultTopic("topic.quick.default");
        return template;
    }

    @Resource
    private KafkaTemplate defaultKafkaTemplate;

    @Test
    public void testDefaultKafkaTemplate() {
        defaultKafkaTemplate.sendDefault("I`m send msg to default topic");
    }

消息结果回调

KafkaTemplate的send方法本事是异步执行的,在发送消息的demo中,通过使用ListenableFuture的get()方法,将发送消息的方法改成同步,同时获取发送消息的结果

一般来说我们都会去获取KafkaTemplate发送消息的结果去判断消息是否发送成功,可以通过kafkaTemplateDemo中发送的方法,使用

final SendResult<String, IntellimStandardDTO> result = send.get();
      if (!result.getRecordMetadata().hasOffset()) {
          throw new SendKafkaException("send kafka topic fail, topic=" + topic);
}

除了这个方法外,还可以给KafkaTemplate设置一个生产者监听器,来监听消息的发送成功和失败,如下:

    @Test
    public void testProducerSend() throws InterruptedException {
        kafkaTemplate.send("topic.quick.demo", "test producer listen");
        Thread.sleep(1000);
    }    

    @Bean
    public KafkaTemplate<Integer, String> kafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
        template.setProducerListener(new ProducerListener() {
            @Override
            public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) {
                log.info("Message send success : " + producerRecord.toString());
            }

            @Override
            public void onSuccess(String topic, Integer partition, Object key, Object value, RecordMetadata recordMetadata) {

            }

            @Override
            public void onError(ProducerRecord producerRecord, Exception exception) {
                log.info("Message send error : " + producerRecord.toString());
            }

            @Override
            public void onError(String topic, Integer partition, Object key, Object value, Exception exception) {

            }
        });
        return template;
    }

在发送消息完之后休眠了1秒,否则发送时间较长的时候会导致进程提前关闭导致无法调用回调时间。主要是因为KafkaTemplate发送消息是采取异步方式发送的,看下KafkaTemplate的源代码:

 public ListenableFuture<SendResult<K, V>> send(String topic, V data) {
        ProducerRecord<K, V> producerRecord = new ProducerRecord(topic, data);
        return this.doSend(producerRecord);
    }

protected ListenableFuture<SendResult<K, V>> doSend(final ProducerRecord<K, V> producerRecord) {
        if (this.transactional) {
            Assert.state(this.inTransaction(), "No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with @Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record");
        }

        final Producer<K, V> producer = this.getTheProducer();
        if (this.logger.isTraceEnabled()) {
            this.logger.trace("Sending: " + producerRecord);
        }

        final SettableListenableFuture<SendResult<K, V>> future = new SettableListenableFuture();
        producer.send(producerRecord, new Callback() {
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                try {
                    if (exception == null) {
                        future.set(new SendResult(producerRecord, metadata));
                        if (KafkaTemplate.this.producerListener != null) {
                            KafkaTemplate.this.producerListener.onSuccess(producerRecord, metadata);
                        }

                        if (KafkaTemplate.this.logger.isTraceEnabled()) {
                            KafkaTemplate.this.logger.trace("Sent ok: " + producerRecord + ", metadata: " + metadata);
                        }
                    } else {
                        future.setException(new KafkaProducerException(producerRecord, "Failed to send", exception));
                        if (KafkaTemplate.this.producerListener != null) {
                            KafkaTemplate.this.producerListener.onError(producerRecord, exception);
                        }

                        if (KafkaTemplate.this.logger.isDebugEnabled()) {
                            KafkaTemplate.this.logger.debug("Failed to send: " + producerRecord, exception);
                        }
                    }
                } finally {
                    if (!KafkaTemplate.this.transactional) {
                        KafkaTemplate.this.closeProducer(producer, false);
                    }

                }

            }
        });
        if (this.autoFlush) {
            this.flush();
        }

        if (this.logger.isTraceEnabled()) {
            this.logger.trace("Sent: " + producerRecord);
        }

        return future;
    }


public class SettableListenableFuture<T> implements ListenableFuture<T>
public interface ListenableFuture<T> extends Future<T> 

发送消息的send方法中,KafkaTemplate会使用ProducerRecord把我们传递进来的参数再一次封装,然后调用doSend方法发送消息到Kafka中,而SettableListenableFuture实现了ListenableFuture接口,ListenableFuture则实现了Future接口,Future是Java自带的实现异步编程的接口,支持返回值的异步

 

Kafka的事务管理

Kafka使用事务的两种方式

(一)配置Kafka事务管理器并使用@Transactional注解
(二)使用KafkaTemplate的executeInTransaction方法

 

使用@Transactional注解方式

首先需要我们配置KafkaTransactionManager,这个类就是Kafka提供给我们的事务管理类,我们需要使用生产者工厂来创建这个事务管理类。需要注意的是,我们需要在producerFactory中开启事务功能,并设置TransactionIdPrefix,TransactionIdPrefix是用来生成Transactional.id的前缀。代码如下:

    @Bean
    public ProducerFactory<Integer, String> producerFactory() {
        DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(senderProps());
        factory.transactionCapable();
        factory.setTransactionIdPrefix("tran-");
        return factory;
    }

    @Bean
    public KafkaTransactionManager transactionManager(ProducerFactory producerFactory) {
        KafkaTransactionManager manager = new KafkaTransactionManager(producerFactory);
        return manager;
    }

配置Kafka事务还是非常简单的,接下来我们测试一下事务是否能正常使用,在DemoTest类中创建该方法

    @Test
    @Transactional
    public void testTransactionalAnnotation() throws InterruptedException {
        kafkaTemplate.send("topic.quick.tran", "test transactional annotation");
        throw new RuntimeException("fail");
    }

运行测试方法后我们可以看到控制台中输出了如下日志,这就代表我们使用事务成功:

org.apache.kafka.common.KafkaException: Failing batch since transaction was aborted
    at org.apache.kafka.clients.producer.internals.Sender.maybeSendTransactionalRequest(Sender.java:317) [kafka-clients-1.0.2.jar:na]
    at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:214) [kafka-clients-1.0.2.jar:na]
    at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:163) [kafka-clients-1.0.2.jar:na]
    at java.lang.Thread.run(Thread.java:748) [na:1.8.0_161]

使用KafkaTemplate.executeInTransaction开启事务

这种方式开启事务是不需要配置事务管理器的,也可以称为本地事务。直接编写测试方法

    @Test
    public void testExecuteInTransaction() throws InterruptedException {
        kafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback() {
            @Override
            public Object doInOperations(KafkaOperations kafkaOperations) {
                kafkaOperations.send("topic.quick.tran", "test executeInTransaction");
                throw new RuntimeException("fail");
                //return true;
            }
        });
    }

运行测试方法后控制台同样打印出了事务终止的异常,代表可以正常使用事务

org.apache.kafka.common.KafkaException: Failing batch since transaction was aborted
    at org.apache.kafka.clients.producer.internals.Sender.maybeSendTransactionalRequest(Sender.java:317) [kafka-clients-1.0.2.jar:na]
    at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:214) [kafka-clients-1.0.2.jar:na]
    at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:163) [kafka-clients-1.0.2.jar:na]
    at java.lang.Thread.run(Thread.java:748) [na:1.8.0_161]

 

消费消息

采用非注解方式:创建监听容器工厂,通过监听容器工厂去创建监听器监听消息消费,代码如下:

/**
     * 初始化消费者工厂
     * @return
     */
    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return new DefaultKafkaConsumerFactory<>(props);
    }

    /**
     * 创建监听容器工厂
     * @return
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        return factory;
    }

/**
     * 通过监听容器工厂创建监听容器
     * @return
     */
    @Bean
    public ConcurrentMessageListenerContainer marketingListenerContainer() {
        ContainerProperties contaicnerProperties = new ContainerProperties(marketingRequestTopic);
        contaicnerProperties.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        contaicnerProperties.setMessageListener(new AcknowledgingMessageListener<String, String>(){
            @Override
            public void onMessage(ConsumerRecord<String, String> consumerRecord, Acknowledgment acknowledgment) {
                logger.info("received market execution request: {}", consumerRecord);
                MarketingRequest request = JSON.parseObject(consumerRecord.value(), MarketingRequest.class);
                MarketingResult result = marketingExecutionService.execute(request);
                acknowledgment.acknowledge();
                logger.info("executed market execution result: {}", result);
            }
        });
        ConcurrentMessageListenerContainer container = new ConcurrentMessageListenerContainer(consumerFactory(), contaicnerProperties);
        container.setConcurrency(1);
        return container;
    }

kafka的consumer的提交模式:AckMode可以选择的提交模式有一下几种:

  • RECORD
    每处理一条commit一次
  • BATCH(默认)
    每次poll的时候批量提交一次,频率取决于每次poll的调用频率
  • TIME 
    每次间隔ackTime的时间去commit
  • COUNT 
    累积达到ackCount次的ack去commit
  • COUNT_TIME
    ackTime或ackCount哪个条件先满足,就commit
  • MANUAL
    listener负责ack,但是背后也是批量上去
  • MANUAL_IMMEDIATE
    listener负责ack,每调用一次,就立即commit

kafka的消息监听器

Spring-Kafka中消息监听大致分为两种类型,一种是单条数据消费,一种是批量消费;两者的区别只是在于监听器一次性获取消息的数量。GenericMessageListener是我们实现消息监听器的一个接口,向上扩展的接口有非常多,比如:

  • 单数据消费的MessageListener、
  • 批量消费的BatchMessageListener、
  • 具备ACK机制的AcknowledgingMessageListener和BatchAcknowledgingMessageListener

GenericMessageListener是一个函数式接口,默认实现了三种不同参数的onMessage方法。

  • data是我们需要接收的数据,
  • Consumer是消费者类,
  • Acknowledgment是用来实现Ack机制的类。

需要注意的是,Consumer对象并不是线程安全的。

@FunctionalInterface
public interface GenericMessageListener<T> {
    void onMessage(T var1);

    default void onMessage(T data, Acknowledgment acknowledgment) {
        throw new UnsupportedOperationException("Container should never call this");
    }

    default void onMessage(T data, Consumer<?, ?> consumer) {
        throw new UnsupportedOperationException("Container should never call this");
    }

    default void onMessage(T data, Acknowledgment acknowledgment, Consumer<?, ?> consumer) {
        throw new UnsupportedOperationException("Container should never call this");
    }
}

继承了GenericMessageListener接口的类。前缀为Batch的接口都是批处理类型的消息监听接口

public interface MessageListener<K, V> {
    void onMessage(ConsumerRecord<K, V> data);
}

public interface AcknowledgingMessageListener<K, V> { 
    void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment);
}

public interface ConsumerAwareMessageListener<K, V> extends MessageListener<K, V> {
    void onMessage(ConsumerRecord<K, V> data, Consumer<?, ?> consumer);
}

public interface AcknowledgingConsumerAwareMessageListener<K, V> extends MessageListener<K, V> { 
    void onMessage(ConsumerRecord<K, V> data, Acknowledgment acknowledgment, Consumer<?, ?> consumer);
}

public interface BatchMessageListener<K, V> { 
    void onMessage(List<ConsumerRecord<K, V>> data);
}

public interface BatchAcknowledgingMessageListener<K, V> {
    void onMessage(List<ConsumerRecord<K, V>> data, Acknowledgment acknowledgment);
}

public interface BatchConsumerAwareMessageListener<K, V> extends BatchMessageListener<K, V> { 
    void onMessage(List<ConsumerRecord<K, V>> data, Consumer<?, ?> consumer);
}

public interface BatchAcknowledgingConsumerAwareMessageListener<K, V> extends BatchMessageListener<K, V> { 
    void onMessage(List<ConsumerRecord<K, V>> data, Acknowledgment acknowledgment, Consumer<?, ?> consumer);
}
  • MessageListener接口:使用MessageListener接口实现时,当消费者拉取消息之后,消费完成会自动提交offset,即enable.auto.commit为true时,适合使用此接口 
  • AcknowledgingMessageListener接口:使用AcknowledgeMessageListener时,当消费者消费一条消息之后,不会自动提交offset,需要手动ack,即enable.auto.commit为false时,适合使用此接口
  • BatchMessageListener和BatchAcknowledgingMessageListener接口:作用与上述两个接口大体类似,只是适合批量消费消息决定是否自动提交offset

kafka消费者的concurrency

在spring-kafka在运行时会启动两类线程,一类是Consumer线程,另一类是Listener线程。前者用来直接调用kafka-client的poll()方法获取消息,后者才是调用我们代码中标有@KafkaListener注解方法的线程。如果直接使用kafka-client的话,那么正常的写法是一个while循环,在循环里面调用poll(),然后处理消息,这在kafka broker看来就是一个Consumer。如果你想用多个Consumer, 除了多启动几个进程以外,也可以在一个进程使用多个线程执行此while()循环。spring-kafka就是这么干的。

当设置concurrency=3时,它设置的是每个@KafkaListener的并发个数,实际上应该是根据topic分区来决定你的线程数量。 比如:两个@KafkaListener1 @KafkaListener2, concurrency=5, topic 分区数=3 时 具体情况如下: 其中一个@KafkaListener会有启动两个消费者(两个线程) 分配到两个分区 另一个@KafkaListener会启动一个消费者(一个线程) 分配到一个分区; 总线程数只会等于分区数3而不会等于5; 当其中@KafkaListener1 挂掉时,会触发消费者再均衡,@KafkaListener2开启三个线程,分配得到三个分区; 在这里我们也可以吧@KafkaListener1 , @KafkaListener2 看成是 两个节点服务; 每添加一个@KafkaListener, spring-kafka都会启动一条Consumer线程来监听这些topic(注解可以指定监听多个topic), 然后再启动spring.kafka.listener.concurrency条线程来真正执行你的Listener(实际数量不能多于分区数)。要注意的是enable-auto-commit设为false时才会启动Listener线程,源码如下:

// if the container is set to auto-commit, then execute in the
                        // same thread
                        // otherwise send to the buffering queue
                        if (this.autoCommit) {
                            invokeListener(records);
                        }
                        else {
                            if (sendToListener(records)) {
                                if (this.assignedPartitions != null) {
                                    // avoid group management rebalance due to a slow
                                    // consumer
                                    this.consumer.pause(this.assignedPartitions);
                                    this.paused = true;
                                    this.unsent = records;
                                }
                            }
                        }

采用@KafkaListener注解实现消息消费

@KafkaListener注解中的参数:

@Target({ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE})
@Retention(RetentionPolicy.RUNTIME)
@MessageMapping
@Documented
@Repeatable(KafkaListeners.class)
public @interface KafkaListener {
    String id() default "";

    String containerFactory() default "";

    String[] topics() default {};

    String topicPattern() default "";

    TopicPartition[] topicPartitions() default {};

    String containerGroup() default "";

    String errorHandler() default "";

    String groupId() default "";

    boolean idIsGroup() default true;

    String clientIdPrefix() default "";

    String beanRef() default "__listener";
}

其中:@KafkaListener的注解都提供了什么属性如下:

  • id:消费者的id,当GroupId没有被配置的时候,默认id为GroupId
  • containerFactory:上面提到了@KafkaListener区分单数据还是多数据消费只需要配置一下注解的containerFactory属性就可以了,这里面配置的是监听容器工厂,也就是ConcurrentKafkaListenerContainerFactory,配置BeanName
  • topics:需要监听的Topic,可监听多个
  • topicPartitions:可配置更加详细的监听信息,必须监听某个Topic中的指定分区,或者从offset为200的偏移量开始监听
  • errorHandler:监听异常处理器,配置BeanName
  • groupId:消费组ID
  • idIsGroup:id是否为GroupId
  • clientIdPrefix:消费者Id前缀
  • beanRef:真实监听容器的BeanName,需要在 BeanName前加 "__"

@KafkaListener这个注解并不局限于这个监听容器是单条数据消费还是批量消费,区分单数据还是多数据消费只需要配置一下注解的containerFactory属性即可,@KafkaListener用于的监听方法可以接受以下参数

  • data : 对于data值的类型没有限定,根据KafkaTemplate所定义的类型来决定。data为List集合的则是用作批量消费。
  • ConsumerRecord:具体消费数据类,包含Headers信息、分区信息、时间戳等
  • Acknowledgment:用作Ack机制的接口
  • Consumer:消费者类,使用该类我们可以手动提交偏移量、控制消费速率等功能

如下,都可以作为监听方法

    public void listen1(String data) 

    public void listen2(ConsumerRecord<K,V> data) 

    public void listen3(ConsumerRecord<K,V> data, Acknowledgment acknowledgment) 

    public void listen4(ConsumerRecord<K,V> data, Acknowledgment acknowledgment, Consumer<K,V> consumer) 

    public void listen5(List<String> data) 

    public void listen6(List<ConsumerRecord<K,V>> data) 

    public void listen7(List<ConsumerRecord<K,V>> data, Acknowledgment acknowledgment) 

    public void listen8(List<ConsumerRecord<K,V>> data, Acknowledgment acknowledgment, Consumer<K,V> consumer) 

@KafkaListene的基本使用如下

@Component
@Slf4j
public class MessageListener {

    @Autowired
    DataService dataService;

    @KafkaListener(topics = "message_topic")
    public void onMessage(ConsumerRecord<String, String> record, Acknowledgment acknowledgment) {
        log.debug("received msg:{}", record.toString;
        final long start = System.currentTimeMillis();
        if(StringUtils.isEmpty(record.value())) {
            log.warn("message is empty, topic={}, partition={}, offset={}", record.topic(), record.partition(), record.offset());
        } else {
            try {
                dataService.handlerMessage(record.value());
            } catch (Exception e) {
                log.error(String.format("topic=%s, partition=%s, offset=%s, json=%s", record.topic(),
                        record.partition(), record.offset(), record.value()), e);
            }
        }
        acknowledgment.acknowledge();
        log.info("topic={}, partition={}, offset={}, cos={}", record.topic(),
                record.partition(), record.offset(),(System.currentTimeMillis()- start));

    }

}

在上面使用ConsumerRecord类消费,ConsumerRecord类里面包含:分区信息、消费位移、消息头、消息体等内容,如果业务需要获取这些参数时,使用ConsumerRecord会是个不错的选择。如果使用具体的类型接收消息体则更加方便,比如说用String类型去接收消息体。

received msg : ConsumerRecord(topic = topic.quick.consumer, partition = 0, offset = 0, CreateTime = 1536652333476, serialized key size = -1, serialized value size = 30, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = test receive by consumerRecord)

 

批量消费

  1. 重新创建一份新的消费者配置,配置为一次拉取5条消息
  2. 创建一个监听容器工厂,设置其为批量消费并设置并发量为5,这个并发量根据分区数决定,必须小于等于分区数,否则会有线程一直处于空闲状态
  3. 创建一个分区数为8的Topic
  4. 创建监听方法,设置消费id为batch,clientID前缀为batch,监听topic.quick.batch,使用batchContainerFactory工厂创建该监听容器

 

 private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        //一次拉取消息数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "5");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean("batchContainerFactory")
    public ConcurrentKafkaListenerContainerFactory listenerContainer() {
        ConcurrentKafkaListenerContainerFactory container = new ConcurrentKafkaListenerContainerFactory();
        container.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        //设置并发量,小于或等于Topic的分区数
        container.setConcurrency(5);
        //设置为批量监听
        container.setBatchListener(true);
        return container;
    }

    @Bean
    public NewTopic batchTopic() {
        return new NewTopic("topic.quick.batch", 8, (short) 1);
    }


    @KafkaListener(id = "batch",clientIdPrefix = "batch",topics = {"topic.quick.batch"},containerFactory = "batchContainerFactory")
    public void batchListener(List<String> data) {
        log.info("topic.quick.batch  receive : ");
        for (String s : data) {
            log.info(  s);
        }
    }


//发送消息
@Autowired
    private KafkaTemplate kafkaTemplate;

    @Test
    public void testBatch() {
        for (int i = 0; i < 12; i++) {
            kafkaTemplate.send("topic.quick.batch", "test batch listener,dataNum-" + i);
        }
    }

可以通过spring.kafka.listener.type=batch开启批量消费。或者通过spring.kafka.consumer.max-poll-recrods来修改每次拉取的消息。默认为500。通过spring.kafka.listener.concurrency来修改同时消费的监听器。建议与分区数量一致。

在项目启动后可以看出监听容器的partition分配信息。我们设置concurrency为5,也就是将会启动5条线程进行监听消费,创建的topic有8个partition,意味着将有3条线程分配到2个partition和2条线程分配到1个partition。日志的最后5行,就是每条线程分配到的partition。

2018-09-11 12:47:49.628  INFO 4708 --- [    batch-2-C-1] o.a.k.c.c.internals.AbstractCoordinator  : [Consumer clientId=batch-2, groupId=batch] Successfully joined group with generation 98
2018-09-11 12:47:49.628  INFO 4708 --- [    batch-2-C-1] o.a.k.c.c.internals.ConsumerCoordinator  : [Consumer clientId=batch-2, groupId=batch] Setting newly assigned partitions [topic.quick.batch-4, topic.quick.batch-5]
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-3-C-1] o.a.k.c.c.internals.AbstractCoordinator  : [Consumer clientId=batch-3, groupId=batch] Successfully joined group with generation 98
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-0-C-1] o.a.k.c.c.internals.AbstractCoordinator  : [Consumer clientId=batch-0, groupId=batch] Successfully joined group with generation 98
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-4-C-1] o.a.k.c.c.internals.AbstractCoordinator  : [Consumer clientId=batch-4, groupId=batch] Successfully joined group with generation 98
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-3-C-1] o.a.k.c.c.internals.ConsumerCoordinator  : [Consumer clientId=batch-3, groupId=batch] Setting newly assigned partitions [topic.quick.batch-6]
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator  : [Consumer clientId=batch-0, groupId=batch] Setting newly assigned partitions [topic.quick.batch-0, topic.quick.batch-1]
2018-09-11 12:47:49.630  INFO 4708 --- [    batch-4-C-1] o.a.k.c.c.internals.ConsumerCoordinator  : [Consumer clientId=batch-4, groupId=batch] Setting newly assigned partitions [topic.quick.batch-7]
2018-09-11 12:47:49.631  INFO 4708 --- [    batch-1-C-1] o.a.k.c.c.internals.AbstractCoordinator  : [Consumer clientId=batch-1, groupId=batch] Successfully joined group with generation 98
2018-09-11 12:47:49.631  INFO 4708 --- [    batch-1-C-1] o.a.k.c.c.internals.ConsumerCoordinator  : [Consumer clientId=batch-1, groupId=batch] Setting newly assigned partitions [topic.quick.batch-2, topic.quick.batch-3]
2018-09-11 12:47:49.633  INFO 4708 --- [    batch-3-C-1] o.s.k.l.KafkaMessageListenerContainer    : partitions assigned: [topic.quick.batch-6]
2018-09-11 12:47:49.633  INFO 4708 --- [    batch-0-C-1] o.s.k.l.KafkaMessageListenerContainer    : partitions assigned: [topic.quick.batch-0, topic.quick.batch-1]
2018-09-11 12:47:49.633  INFO 4708 --- [    batch-4-C-1] o.s.k.l.KafkaMessageListenerContainer    : partitions assigned: [topic.quick.batch-7]
2018-09-11 12:47:49.633  INFO 4708 --- [    batch-1-C-1] o.s.k.l.KafkaMessageListenerContainer    : partitions assigned: [topic.quick.batch-2, topic.quick.batch-3]
2018-09-11 12:47:49.634  INFO 4708 --- [    batch-2-C-1] o.s.k.l.KafkaMessageListenerContainer    : partitions assigned: [topic.quick.batch-4, topic.quick.batch-5]

在发送消息的方法中,短时间内发送12条消息到topic中,可以看到运行结果,对应的监听方法总共拉取了三次数据,其中两次为5条数据,一次为2条数据,加起来就是我们在测试方法发送的12条数据。如下

2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch  receive : 
2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-5
2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-2
2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-10
2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-6
2018-09-11 12:08:51.840  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-3
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch  receive : 
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-11
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-0
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-8
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-7
2018-09-11 12:08:51.841  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-4
2018-09-11 12:08:51.842  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch  receive : 
2018-09-11 12:08:51.842  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-1
2018-09-11 12:08:51.842  INFO 12416 --- [    batch-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-9

注意:设置的并发量不能大于partition的数量,如果需要提高吞吐量,可以通过增加partition的数量达到快速提升吞吐量的效果。

 

监听Topic中指定的分区

@KafkaListener注解的topicPartitions属性可以监听不同的partition分区
@TopicPartition:topic--需要监听的Topic,partitions --需要监听Topic的分区id,partitionOffsets --设置从某个偏移量开始监听
@PartitionOffset:partition --分区Id,非数组initialOffset --初始偏移量

 

  @KafkaListener(id = "batchWithPartition",clientIdPrefix = "bwp",containerFactory = "batchContainerFactory",
        topicPartitions = {
            @TopicPartition(topic = "topic.quick.batch.partition",partitions = {"1","3"}),
            @TopicPartition(topic = "topic.quick.batch.partition",partitions = {"0","4"},
                    partitionOffsets = @PartitionOffset(partition = "2",initialOffset = "100"))
        }
    )
    

在项目启动的时候,发送消息,由于只监听了0、1、2、3、4这几个partition,也就是说5 6 7这三个分区的消息我们并没有读取出来。

2018-09-11 14:51:09.063  INFO 1532 --- [Partition-2-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.063  INFO 1532 --- [Partition-2-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-4
2018-09-11 14:51:09.064  INFO 1532 --- [Partition-1-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.064  INFO 1532 --- [Partition-1-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-2
2018-09-11 14:51:09.075  INFO 1532 --- [Partition-0-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.075  INFO 1532 --- [Partition-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-1
2018-09-11 14:51:09.078  INFO 1532 --- [Partition-1-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.078  INFO 1532 --- [Partition-1-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-10
2018-09-11 14:51:09.091  INFO 1532 --- [Partition-4-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.091  INFO 1532 --- [Partition-4-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-5
2018-09-11 14:51:09.095  INFO 1532 --- [Partition-0-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.096  INFO 1532 --- [Partition-0-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-9
2018-09-11 14:51:09.097  INFO 1532 --- [Partition-3-C-1] com.viu.kafka.listen.BatchListener       : topic.quick.batch.partition  receive : 
2018-09-11 14:51:09.098  INFO 1532 --- [Partition-3-C-1] com.viu.kafka.listen.BatchListener       : test batch listener,dataNum-7

 

注解方式获取消息头及消息体

当你接收的消息包含请求头,以及你监听方法需要获取该消息非常多的字段时可以通过这种方式,毕竟get方法代码量还是稍多点的。这里使用的是默认的监听容器工厂创建的,如果你想使用批量消费,把对应的类型改为List即可,比如List<String> data , List<Integer> key。

  • @Payload:获取的是消息的消息体,也就是发送内容
  • @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY):获取发送消息的key
  • @Header(KafkaHeaders.RECEIVED_PARTITION_ID):获取当前消息是从哪个分区中监听到的
  • @Header(KafkaHeaders.RECEIVED_TOPIC):获取监听的TopicName
  • @Header(KafkaHeaders.RECEIVED_TIMESTAMP):获取时间戳
    @KafkaListener(id = "anno", topics = "topic.quick.anno")
    public void annoListener(@Payload String data,
                             @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) Integer key,
                             @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                             @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                             @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts) {
        log.info("topic.quick.anno receive : \n"+
            "data : "+data+"\n"+
            "key : "+key+"\n"+
            "partitionId : "+partition+"\n"+
            "topic : "+topic+"\n"+
            "timestamp : "+ts+"\n"
        );

    }

测试结果如下:

2018-09-11 15:27:47.108  INFO 7592 --- [     anno-0-C-1] com.viu.kafka.listen.SingleListener      : topic.quick.anno receive : 
data : test anno listener
key : 0
partitionId : 0
topic : topic.quick.anno
timestamp : 1536650867015

 

使用Ack机制确认消费

使用Kafka的Ack机制比较简单,只需简单的三步即可:

  1. 设置ENABLE_AUTO_COMMIT_CONFIG=false,禁止自动提交
  2. 设置AckMode=MANUAL_IMMEDIATE
  3. 监听方法加入Acknowledgment ack 参数

拒绝消息只要在监听方法中不调用ack.acknowledge()即可

@Component
public class AckListener {

    private static final Logger log= LoggerFactory.getLogger(AckListener.class);

    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean("ackContainerFactory")
    public ConcurrentKafkaListenerContainerFactory ackContainerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        return factory;
    }


    @KafkaListener(id = "ack", topics = "topic.quick.ack",containerFactory = "ackContainerFactory")
    public void ackListener(ConsumerRecord record, Acknowledgment ack) {
        log.info("topic.quick.ack receive : " + record.value());
        ack.acknowledge();
    }
}

Kafka是通过最新保存偏移量进行消息消费的,而且确认消费的消息并不会立刻删除,所以我们可以重复的消费未被删除的数据,当第一条消息未被确认,而第二条消息被确认的时候,Kafka会保存第二条消息的偏移量,也就是说第一条消息再也不会被监听器所获取,除非是根据第一条消息的偏移量手动获取。通过使用Consumer.seek方法,可以指定位移进行消费,这种可能会导致死循环,原因出现于业务一直没办法处理这条数据,但还是不停的重新定位到该数据的偏移量上。

 

消息转发

kafka可以实现消息的转发,假如系统A从Topic-A中获取到消息,进行处理后转发到Topic-B中,系统B监听Topic-B获取新的消息再次进行处理

实现方式

Spring-Kafka整合了两种消息转发方式:

  1. 手动转发,使用@SendTo注解将监听方法返回值转发到Topic中
  2. 使用Headers设置回复主题(Reply_Topic),是一种请求响应模式,使用的是ReplyingKafkaTemplate类

@SendTo方式

1. 配置ConcurrentKafkaListenerContainerFactory的ReplyTemplateReplyTemplate是我们用来转发消息所使用的类。@SendTo注解本质其实就是利用这个ReplyTemplate转发监听方法的返回值到对应的Topic中,效果和使用KakfaTemplate.send()转发新的topic一样

    @Bean
    public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setReplyTemplate(kafkaTemplate());
        return factory;
    }

2. 监听方法加上@SendTo注解,转发到新的topic

@Component
public class ForwardListener {

    private static final Logger log= LoggerFactory.getLogger(ForwardListener.class);

    @KafkaListener(id = "forward", topics = "topic.quick.target")
    @SendTo("topic.quick.real")
    public String forward(String data) {
        log.info("topic.quick.target  forward "+data+" to  topic.quick.real");
        return "topic.quick.target send msg : " + data;
    }
}

当"topic.quick.target"监听到消息后,经过处理返回后从新发送到"topic.quick.real"这个topic

 

ReplyTemplate方式

主要流程如下:

首先配置ConcurrentKafkaListenerContainerFactory的ReplyTemplate,注册一个KafkaMessageListenerContainer类型的监听容器,监听topic.quick.reply(转发的topic),这个监听器里面我们不处理任何事情,交由ReplyingKafkaTemplate处理,通过ProducerFactory和KafkaMessageListenerContainer创建一个ReplyingKafkaTemplate类型的Bean,设置回复超时时间为10秒

    @Bean
    public ReplyingKafkaTemplate<String, String, String> replyingTemplate(
            ProducerFactory<String, String> producerFactory,
            ConcurrentMessageListenerContainer<String, String> repliesContainer) {

        ReplyingKafkaTemplate template = new ReplyingKafkaTemplate<>(producerFactory, repliesContainer);
        //同步相应超时时间:10s
        template.setReplyTimeout(10000);
        return template;
    }
  

     @Bean
    ConcurrentKafkaListenerContainerFactory<String, String> containerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        //设置kafkaTemplate支持sendTo
        factory.setReplyTemplate(kafkaTemplate());
        return factory;
    }

    @Bean
    public ConcurrentMessageListenerContainer<String, String> repliesContainer(ConcurrentKafkaListenerContainerFactory<String, String> containerFactory) {
        ConcurrentMessageListenerContainer<String, String> repliesContainer =
           containerFactory.createContainer("topic.quick.reply");
        repliesContainer.getContainerProperties().setGroupId("replies_message_group");
        repliesContainer.setAutoStartup(false);
        return repliesContainer;
    }

创建ProducerRecord类,使用ReplyingKafkaTemplate来发送消息,并添加KafkaHeaders.REPLY_TOPIC到record的headers参数中,这个参数配置我们想要转发到哪个Topic中。

    @Autowired
    private ReplyingKafkaTemplate replyingKafkaTemplate;

    /**
     * 发送消息数据,同步返回结果
     * @param paraMessageBO
     */
    public String sendMessage(MessageBO paraMessageBO){
        String returnValue = null;
        String message = null;
        try {
            message = new ObjectMapper().writeValueAsString(paraMessageBO);
            log.info("同步发送消息数据start:" + message);
            //发送topic
            ProducerRecord<String, String> record = new ProducerRecord<>("topic.quick.request", message);
            //回复topic
            record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "topic.quick.reply".getBytes()));
            RequestReplyFuture<String, String, String> replyFuture = replyingKafkaTemplate.sendAndReceive(record);
            SendResult<String, String> sendResult = replyFuture.getSendFuture().get();
            log.info("Sent ok: " + sendResult.getRecordMetadata());
            ConsumerRecord<String, String> consumerRecord = replyFuture.get();
            returnValue = consumerRecord.value();
            log.info("Return value: " + returnValue);
            log.info("同步发送消息数据end。");
        }catch (Exception e){
            log.error("同步发送消息失败 MESSAGE:"+message,e.getMessage());
        }
        return returnValue;
    }

监听"topic.quick.request"消息,指定containerFactory,否则无法收到此条message,并加入@SendTo,否则无法返回给producer

@Component
@Slf4j
public class MessageConsumer {
    
    @KafkaListener(topics = "topic.quick.request",containerFactory = "containerFactory")
    @SendTo
    public String consumerAsyn(String msgData){
        log.info("topic.quick.request receive : "+msgData);
        return "topic.quick.reply  reply : "+msgData;
    }
}

 

 

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值