近期,由于工作需要,在创建的一个springboot的项目中,同时需要连接两个kafka集群,从集群中获取数据存入数据库进行分析,之前一直使用的是一个集群,两个集群连接没有操作过,经过查阅,最终找到方法,以此来记录一下。
1、首先需要在项目中配置kafka配置类
主要思想是创建两个消费者工厂,并使用@Primary注解指定消费者在进行消费时默认选择的工厂,两个工程的配置参数一样没什么变化。
package com.lantu.web.appconfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafka
public class KafkaConfig {
@Value("${spring.sskafka.producer.batch-size}")
private String sbbatchSize;
@Value("${spring.sskafka.producer.buffer-memory}")
private String sbbufferMemory;
@Value("${spring.sskafka.bootstrap-servers}")
private String sbservers;
@Value("${spring.sskafka.consumer.group-id}")
private String sbgroupid;
@Value("${spring.sskafka.producer.batch-size}")
private String sabatchSize;
@Value("${spring.sskafka.producer.buffer-memory}")
private String sabufferMemory;
@Value("${spring.sskafka.bootstrap-servers}")
private String saservers;
@Value("${spring.sskafka.consumer.group-id}")
private String sagroupid;
@Bean
@Primary//理解为默认优先选择当前容器下的消费者工厂
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
@Bean//第一个消费者工厂的bean
public ConsumerFactory<Integer, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
@Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, sbbatchSize);
props.put(ConsumerConfig.GROUP_ID_CONFIG, sbgroupid);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return props;
}
/**
* 连接第二个kafka集群的配置
*/
@Bean
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaListenerContainerFactoryTwoSchedule() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactoryTwoSchedule());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
@Bean
public ConsumerFactory<Integer, String> consumerFactoryTwoSchedule() {
return new DefaultKafkaConsumerFactory<>(consumerConfigsTwoSchedule());
}
/**
*
*连接第二个集群的消费者配置
*/
@Bean
public Map<String, Object> consumerConfigsTwoSchedule() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, saservers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, sagroupid);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return props;
}
}
2、消费集群消息的具体实现
因为同时连接两个集群,因此在消费时,一个消费默认的,一个消费指定的,因此必须要在第二个中指定连接的消费者工厂containerFactory = “kafkaListenerContainerFactoryTwoSchedule”
package com.lantu.web.application.kafka.manage.controller;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.web.bind.annotation.RestController;
import java.util.Optional;
/**
* kafka消费来自两个集群的消息
*/
@Slf4j
@RestController
public class TenantSyncInfoController {
/**
* 订阅kafka
* @param record
*/
@KafkaListener(topics = {"first-req"})
public void tenantKafkaCustomer(ConsumerRecord<?, ?> record){
log.info("消费者成功订阅kafka集群1--------------------***********:first-req");
Optional<?> message = Optional.ofNullable(record.value());
//判断Optional是否为存在值状态
if(message.isPresent()){
System.out.println("--------------------***********1"+message);
}
}
@KafkaListener(topics = {"second-req"},containerFactory = "kafkaListenerContainerFactoryTwoSchedule")
public void shandongKafkaCustomer(ConsumerRecord<?, ?> record){
log.info("消费者成功订阅kafka集群2--------------------***********:second-req");
Optional<?> message = Optional.ofNullable(record.value());
if(message.isPresent()){
System.out.println("--------------------***********2"+message);
}
}
}
集群的配置都是写在springboot的配置文件中的,这里就不在展示了
3、还有种方式,就是将两个集群的所有服务地址都写在bootstrap-servers属性值上,接收消息也是没有问题的,也能够正常消费来自两个集群的不同消息,但是不知道这种方式是否符合规矩,因此没有采用这种方式,在这里也就不再描述。