场景分析
有一个websocket服务,消费来自kafka的消息,并且推送给指定的ws客户端。原本kafka配置是从配置文件中直接读取,当需要多实例启动时,并且想通过消费分组去让不同实例都消费topic中的数据时,配置文件显然就捉襟见肘了。
解决方案
考虑多实例部署时候生成不用的消费分组,每个实例启动后被分配在不通消费分组中。利用不通消费分组均能消费topic中消息的原理,实现kafka的订阅发布。
talk is cheap,show you the code
kafka配置
@Slf4j
@Component
public class KafkaConsumerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String BROKERS;
@Value("${spring.kafka.consumer.enable-auto-commit}")
private Boolean ENABLE_AUTO_COMMIT;
@Value("${spring.kafka.consumer.auto-commit-interval-ms}")
private String AUTO_COMMIT_INTERVAL_MS;
@Value("${spring.kafka.consumer.session-timeout-ms}")
private Integer SESSION_TIMEOUT_MS;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String AUTO_OFFSET_RESET;
@Value("${spring.kafka.consumer.group-id}")
private String GROUP_ID;
@Value("${spring.kafka.consumer.max-poll-records}")
private String MAX_POLL_RECORDS;
/**缓存名称前缀*/
private final String CACHE_GROUP_NAME_PREFIX = "CONSOLE:WSMSGCONSUMER:";
/**消息推送消费者消费分组上限*/
private final Integer WS_MAX_GROUP_VALUE = 6;
private String CURRENT_INSTANCE_GROUP_ID;
@Autowired
private RedisOperate redisOperate;
/** 线程池,为了实现分组名称续租服务*/
private ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1);
/**构建kafka监听工厂*/
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
/**通过redis限制获取的分组名称*/
public String getSerializeGroupId(Integer currValue){
if(currValue>WS_MAX_GROUP_VALUE){
/**尽可能保证使用重复分组名称进行消费*/
throw new RuntimeException("oversize WS_MAX_GROUP_VALUE");
}
String key = CACHE_GROUP_NAME_PREFIX.concat(currValue.toString());
boolean b = redisOperate.setIfAbsent(key, currValue, 180, TimeUnit.SECONDS);
if(b){
startHoldingGroupName(key);
return GROUP_ID.concat(currValue.toString());
}else{
currValue++;
return getSerializeGroupId(currValue);
}
}
/**初始化消费工厂配置 其中会动态指定消费分组*/
private ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERS);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, ENABLE_AUTO_COMMIT);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, AUTO_COMMIT_INTERVAL_MS);
properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, MAX_POLL_RECORDS);
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
/**多实例部署每个实例设置不同groupId 实现发布订阅*/
CURRENT_INSTANCE_GROUP_ID = getSerializeGroupId(0);
log.info("当前实例WsMsgConsumer group_id:{}",CURRENT_INSTANCE_GROUP_ID);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, CURRENT_INSTANCE_GROUP_ID);
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, AUTO_OFFSET_RESET);
return new DefaultKafkaConsumerFactory<String, String>(properties);
}
/**对当前实例持有的消费分组进行续租*/
private void startHoldingGroupName(String groupKey){
scheduledExecutorService.scheduleAtFixedRate(()->{
log.info("startHoldingGroupName {}",groupKey);
redisOperate.expire(groupKey,180);
},0,120,TimeUnit.SECONDS);
}
}
kafka消费者
@Component
@Slf4j
public class WsMsgConsumer {
/**通过containerFactory指定到刚才初始化的kafkaListenerContainerFactory*/
@KafkaListener(topics = "${spring.kafka.consumer.ws.task.push.topic}", containerFactory="kafkaListenerContainerFactory")
public void WsMsgConsumer(ConsumerRecord<?, ?> record) {
log.info("WsMsgConsumer==>{},kafka_record==>{}", record.topic(), record.toString());
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
try {
String message = (String) kafkaMessage.get();
JSONObject jsonObject = JSON.parseObject(message);
WxMsgDTO wxMsgDTO = JSON.toJavaObject(jsonObject, WxMsgDTO.class);
publishMessage(wxMsgDTO.getStatus().toString(),wxMsgDTO.getUserId(),wxMsgDTO.getStoreId());
}catch (Exception e){
log.error("WsMsgConsumer,消费失败----"+ CommonUtil.getTrace(e));
}
}
}
}