Spring Boot集成Kafka(含多数据源)

1. 添加依赖

在项目的 pom.xml 文件中添加 Spring Kafka 的依赖。

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
    <!-- 请使用与 Spring Boot 版本相匹配的版本号 -->
</dependency>

2. 配置 Kafka

xx.ymlxx.properties 中配置 Kafka 连接信息,对于多数据源,可以按需配置多个 Kafka 集群的连接信息。
单数据源:

# ████████ Kafka相关 ████████
spring.kafka.bootstrap-servers=localhost:9092
# 用于在客户端向服务器发送请求时传递给服务器的标识字符串,不设置时系统会自动生成。
#spring.kafka.producer.client-id=common-parent
# 控制生产者(Producer)发送消息时的确认(acknowledgement)级别(1:只要集群的首领节点(Leader)收到消息,生产者就会收到确认。)
spring.kafka.producer.acks=1
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.retries=10
# 指定元数据的最大生存时间
spring.kafka.producer.properties.metadata.max.age.ms=300000
# 可以发送的请求的最大字节数
spring.kafka.producer.properties.metadata.max.size=200000000
# 用于标识消费者所属的消费者组
spring.kafka.consumer.group-id=common-parent
# 用于设置当enable.auto.commit设置为true时,消费者自动向Kafka提交偏移量(offset)的频率,单位为毫秒。默认情况下,这个值是5000毫秒,即每5秒钟自动提交一次偏移量。
spring.kafka.consumer.enable-auto-commit=true
# 用于指定消费者在找不到已有偏移量(offset)时的行为(earliest:如果消费者组没有提交过偏移量,消费者将从主题中最早(即最早的偏移量)的消息开始消费)
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.group=default
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

多数据源:
说明:第一个数据源我配置的是:localhost:9092,第二个数据源我配置的是:127.0.0.1:9092,虽然本质上是同一个数据源,不过重点看最后的配置是否生效即可。

# ████████ Kafka相关 ████████
spring.kafka.multiple.primary=first
spring.kafka.multiple.datasource.first.bootstrap-servers=localhost:9092
# 用于在客户端向服务器发送请求时传递给服务器的标识字符串,不设置时系统会自动生成。
#spring.kafka.multiple.datasource.first.producer.client-id=common-parent
# 控制生产者(Producer)发送消息时的确认(acknowledgement)级别(1:只要集群的首领节点(Leader)收到消息,生产者就会收到确认。)
spring.kafka.multiple.datasource.first.producer.acks=1
spring.kafka.multiple.datasource.first.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.multiple.datasource.first.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.multiple.datasource.first.producer.retries=10
# 指定元数据的最大生存时间
spring.kafka.multiple.datasource.first.producer.properties.metadata.max.age.ms=300000
# 可以发送的请求的最大字节数
spring.kafka.multiple.datasource.first.producer.properties.metadata.max.size=200000000
# 用于标识消费者所属的消费者组
spring.kafka.multiple.datasource.first.consumer.group-id=common-parent
# 用于设置当enable.auto.commit设置为true时,消费者自动向Kafka提交偏移量(offset)的频率,单位为毫秒。默认情况下,这个值是5000毫秒,即每5秒钟自动提交一次偏移量。
spring.kafka.multiple.datasource.first.consumer.enable-auto-commit=true
# 用于指定消费者在找不到已有偏移量(offset)时的行为(earliest:如果消费者组没有提交过偏移量,消费者将从主题中最早(即最早的偏移量)的消息开始消费)
spring.kafka.multiple.datasource.first.consumer.auto-offset-reset=earliest
spring.kafka.multiple.datasource.first.consumer.group=default
spring.kafka.multiple.datasource.first.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.multiple.datasource.first.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.multiple.datasource.second.bootstrap-servers=127.0.0.1:9092
#spring.kafka.multiple.datasource.second.producer.client-id=common-parent
spring.kafka.multiple.datasource.second.producer.acks=1
spring.kafka.multiple.datasource.second.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.multiple.datasource.second.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.multiple.datasource.second.producer.retries=10
spring.kafka.multiple.datasource.second.producer.properties.metadata.max.age.ms=300000
spring.kafka.multiple.datasource.second.producer.properties.metadata.max.size=200000000
spring.kafka.multiple.datasource.second.consumer.group-id=default
spring.kafka.multiple.datasource.second.consumer.enable-auto-commit=true
spring.kafka.multiple.datasource.second.consumer.auto-offset-reset=earliest
spring.kafka.multiple.datasource.second.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.multiple.datasource.second.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

3. 创建Kafka配置类

KafkaConfig.java

package priv.fc.common_parent.configuration.config.mq;

import cn.hutool.core.util.ObjectUtil;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;

/**
 * Kafka配置类
 *
 * @author 付聪
 * @time 2024-08-11 14:54:13
 */
@Configuration
public class KafkaConfig {

    // ———————————————— 配置属性Bean(开始) ————————————————
    @Primary
    @ConfigurationProperties(prefix = "spring.kafka.multiple.datasource.first")
    @Bean(name = "firstDataSourceKafkaProperties")
    public KafkaProperties firstDataSourceKafkaProperties() {
        return new KafkaProperties();
    }

    @ConfigurationProperties(prefix = "spring.kafka.multiple.datasource.second")
    @Bean(name = "secondDataSourceKafkaProperties")
    public KafkaProperties secondDataSourceKafkaProperties() {
        return new KafkaProperties();
    }
    // ———————————————— 配置属性Bean(结束) ————————————————


    // ———————————————— 生产者相关(开始) ————————————————
    @Primary
    @Bean(name = "kafkaTemplate", destroyMethod = "destroy")
    public KafkaTemplate kafkaTemplate(@Autowired @Qualifier("firstDataSourceKafkaProperties") KafkaProperties kafkaProperties) {
        return new KafkaTemplate(this.getProducerFactory(kafkaProperties));
    }

    @Bean(name = "firstKafkaTemplate", destroyMethod = "destroy")
    public KafkaTemplate firstKafkaTemplate(@Autowired @Qualifier("firstDataSourceKafkaProperties") KafkaProperties kafkaProperties) {
        return new KafkaTemplate(this.getProducerFactory(kafkaProperties));
    }

    @Bean(name = "secondKafkaTemplate", destroyMethod = "destroy")
    public KafkaTemplate secondKafkaTemplate(@Autowired @Qualifier("secondDataSourceKafkaProperties") KafkaProperties kafkaProperties) {
        return new KafkaTemplate(this.getProducerFactory(kafkaProperties));
    }

    private ProducerFactory<String, String> getProducerFactory(KafkaProperties kafkaProperties) {
        return new DefaultKafkaProducerFactory<>(this.getProducerConfigs(kafkaProperties));
    }

    private Map<String, Object> getProducerConfigs(KafkaProperties kafkaProperties) {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        props.put(ProducerConfig.RETRIES_CONFIG, kafkaProperties.getProducer().getRetries());
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    // ———————————————— 生产者相关(结束) ————————————————

    // ———————————————— 消费者相关(开始) ————————————————
    @Primary
    @Bean(name = "firstDataSourceConsumerFactory")
    public ConsumerFactory<Object, Object> firstDataSourceConsumerFactory(@Autowired @Qualifier("firstDataSourceKafkaProperties") KafkaProperties kafkaProperties) {
        return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties());
    }

    @Bean(name = "firstDataSourceKafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Object, Object>> firstDataSourceKafkaListenerContainerFactory(
        @Autowired @Qualifier("firstDataSourceKafkaProperties") KafkaProperties kafkaProperties,
        @Autowired @Qualifier("firstDataSourceConsumerFactory") ConsumerFactory<Object, Object> consumerFactory
    ) {
        return getKafkaListenerContainerFactory(kafkaProperties, consumerFactory);
    }

    @Bean(name = "secondDataSourceConsumerFactory")
    public ConsumerFactory<Object, Object> secondDataSourceConsumerFactory(@Autowired @Qualifier("secondDataSourceKafkaProperties") KafkaProperties kafkaProperties) {
        return new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties());
    }

    @Bean(name = "secondDataSourceKafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Object, Object>> secondDataSourceKafkaListenerContainerFactory(
        @Autowired @Qualifier("secondDataSourceKafkaProperties") KafkaProperties kafkaProperties,
        @Autowired @Qualifier("secondDataSourceConsumerFactory") ConsumerFactory<Object, Object> consumerFactory
    ) {
        return getKafkaListenerContainerFactory(kafkaProperties, consumerFactory);
    }

    private KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Object, Object>> getKafkaListenerContainerFactory(KafkaProperties kafkaProperties, ConsumerFactory<Object, Object> consumerFactory) {
        // 创建支持并发消费的容器
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        if (ObjectUtil.isNotEmpty(kafkaProperties.getListener().getConcurrency())) {
            // 设置容器的并发级别
            factory.setConcurrency(kafkaProperties.getListener().getConcurrency());
        }
        if (ObjectUtil.isNotEmpty(kafkaProperties.getListener().getAckMode())) {
            // 设置Kafka消息监听器容器的确认模式
            factory.getContainerProperties().setAckMode(kafkaProperties.getListener().getAckMode());
        }
        return factory;
    }
    // ———————————————— 消费者相关(结束) ————————————————

}

KafkaServiceConfig.java

package priv.fc.common_parent.common.config;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.kafka.core.KafkaTemplate;
import priv.fc.common_parent.common.service.KafkaService;
import priv.fc.common_parent.common.service.impl.KafkaServiceImpl;

/**
 * KafkaService配置类
 *
 * @author 付聪
 * @time 2024-08-22 21:55:54
 */
@Configuration
public class KafkaServiceConfig {

    @Primary
    @Bean(name = "kafkaService")
    public KafkaService kafkaService(@Autowired @Qualifier("kafkaTemplate") KafkaTemplate<String, String> kafkaTemplate) {
        return new KafkaServiceImpl(kafkaTemplate);
    }

    @Bean(name = "firstKafkaService")
    public KafkaService firstKafkaService(@Autowired @Qualifier("firstKafkaTemplate") KafkaTemplate<String, String> firstKafkaTemplate) {
        return new KafkaServiceImpl(firstKafkaTemplate);
    }

    @Bean(name = "secondKafkaService")
    public KafkaService secondKafkaService(@Autowired @Qualifier("secondKafkaTemplate") KafkaTemplate<String, String> secondKafkaTemplate) {
        return new KafkaServiceImpl(secondKafkaTemplate);
    }

}

4. 创建KafkaService接口

说明:这里仅仅封装了一个示例方法,自己需要什么方法可自行封装。

package priv.fc.common_parent.common.service;

import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;

/**
 * KafkaService接口
 *
 * @author 付聪
 * @time 2024/4/12 11:37
 */
public interface KafkaService {

    /**
     * 将数据发送到提供的主题,不带键或分区。
     *
     * @param topic - 主题
     * @param value - 数据
     * @return ListenableFuture<SendResult<String, String>> - 发送后ListenableFuture的结果(具有接受完成回调的功能)
     * @author 付聪
     * @time 2024/8/11 下午2:59:00
     */
    ListenableFuture<SendResult<String, String>> send(String topic, String value);

}

5. 创建KafkaService实现类

package priv.fc.common_parent.common.service.impl;

import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import priv.fc.common_parent.common.service.KafkaService;

/**
 * KafkaService实现类
 *
 * @throws
 * @author 付聪
 * @time 2024-08-11 15:03:51
 */
@Service
public class KafkaServiceImpl implements KafkaService {

    private final KafkaTemplate<String, String> kafkaTemplate;

    public KafkaServiceImpl(KafkaTemplate<String, String> template) {
        this.kafkaTemplate = template;
    }

    @Override
    public ListenableFuture<SendResult<String, String>> send(String topic, String value) {
        return kafkaTemplate.send(topic, value);
    }

}

6. 测试发送消息

KafkaLearnController.java

package priv.fc.common_parent.learn.kafka.controller;

import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiResponse;
import io.swagger.annotations.ApiResponses;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import priv.fc.common_parent.configuration.response.Result;
import priv.fc.common_parent.learn.kafka.service.KafkaLearnService;

import javax.annotation.Resource;

/**
 * Kafka学习管理
 *
 * @author 付聪
 * @time 2024-08-10 23:30:03
 */
@Api(tags = {"Kafka学习管理"}, hidden = false)
@RestController
@RequestMapping("/kafka_learn")
public class KafkaLearnController {

    @Resource
    private KafkaLearnService kafkaLearnService;

    @ApiOperation(value = "测试能否正常发送、消费消息", notes = "无特别说明!", hidden = false)
    @ApiResponses({@ApiResponse(code = 200, message = "OK", response = Void.class)})
    @RequestMapping(value = "/test", method = RequestMethod.POST)
    Result<Void> test() {
        kafkaLearnService.send();
        return new Result<>();
    }

}

KafkaLearnService.java

package priv.fc.common_parent.learn.kafka.service;

/**
 * Kafka学习Service接口
 *
 * @author 付聪
 * @time 2024-08-10 23:30:03
 */
public interface KafkaLearnService {

    /**
     * 将数据发送到提供的主题,不带键或分区。
     *
     * @return void - 空
     * @author 付聪
     * @time 2024-08-10 23:31:08
     */
    void send();

}

KafkaLearnValueReqDto.java

package priv.fc.common_parent.learn.kafka.req_dto;

import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.ToString;
import org.springframework.format.annotation.DateTimeFormat;
import priv.fc.common_parent.common.req_dto.CommonReqDto;

import java.io.Serializable;
import java.util.Date;

/**
 * Kafka学习数据请求对象
 *
 * @author 付聪
 * @time 2024-08-11 09:37:25
 */
@ApiModel(value = "KafkaLearnValueReqDto", description = "Kafka学习数据请求对象")
@Data
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
// 说明:可以不继承CommonReqDto(自己定义的一个公共请求DTO而已)
public class KafkaLearnValueReqDto extends CommonReqDto implements Serializable {

    private static final long serialVersionUID = 7674576755150005686L;

    @ApiModelProperty(value = "主键ID", example = "", hidden = false)
    private Long id;

    @ApiModelProperty(value = "备注", example = "", hidden = false)
    private String remark;

    @ApiModelProperty(value = "创建人ID", example = "", hidden = false)
    private Long createPersonId;

    @ApiModelProperty(value = "创建时间(默认以【yyyy-MM-dd HH:mm:ss】格式的字符串接收前端传参)", example = "2024-04-09 16:04:03", hidden = false)
    @DateTimeFormat(pattern = "yyyy-MM-dd HH:mm:ss")
    private Date createTime;

    @ApiModelProperty(value = "更新人ID", example = "", hidden = false)
    private Long updatePersonId;

    @ApiModelProperty(value = "更新时间(默认以【yyyy-MM-dd HH:mm:ss】格式的字符串接收前端传参)", example = "2024-04-09 16:04:03", hidden = false)
    @DateTimeFormat(pattern = "yyyy-MM-dd HH:mm:ss")
    private Date updateTime;

    @ApiModelProperty(value = "是否删除", example = "", hidden = false)
    private Integer delFlag;

}

KafkaLearnServiceImpl.java

package priv.fc.common_parent.learn.kafka.service.impl;

import cn.hutool.json.JSONUtil;
import org.apache.commons.lang.math.NumberUtils;
import org.springframework.stereotype.Service;
import priv.fc.common_parent.common.service.KafkaService;
import priv.fc.common_parent.learn.kafka.req_dto.KafkaLearnValueReqDto;
import priv.fc.common_parent.learn.kafka.service.KafkaLearnService;

import javax.annotation.Resource;

/**
 * Kafka学习Service实现类
 *
 * @author 付聪
 * @time 2024-08-10 23:30:03
 */
@Service
public class KafkaLearnServiceImpl implements KafkaLearnService {

    @Resource
    private KafkaService kafkaService;

    @Resource
    private KafkaService firstKafkaService;

    @Resource
    private KafkaService secondRKafkaService;

    @Override
    public void send() {

        KafkaLearnValueReqDto kafkaLearnValueReqDto = new KafkaLearnValueReqDto();
        kafkaLearnValueReqDto.setRemark("【默认数据源】的消息:Hello Kafka!");
        kafkaLearnValueReqDto.setDelFlag(NumberUtils.INTEGER_ZERO);

        // 说明:发送消息之前的Topic需要先创建,可以通过命令或者可视化工具创建。
        kafkaService.send("test", JSONUtil.toJsonStr(kafkaLearnValueReqDto));

        KafkaLearnValueReqDto kafkaLearnValueReqDto1 = new KafkaLearnValueReqDto();
        kafkaLearnValueReqDto1.setRemark("【第一个数据源】的消息:Hello Kafka!");
        kafkaLearnValueReqDto1.setDelFlag(NumberUtils.INTEGER_ZERO);

        firstKafkaService.send("test", JSONUtil.toJsonStr(kafkaLearnValueReqDto1));

        KafkaLearnValueReqDto kafkaLearnValueReqDto2 = new KafkaLearnValueReqDto();
        kafkaLearnValueReqDto2.setRemark("【第二个数据源】的消息:Hello Kafka!");
        kafkaLearnValueReqDto2.setDelFlag(NumberUtils.INTEGER_ZERO);

        secondRKafkaService.send("test", JSONUtil.toJsonStr(kafkaLearnValueReqDto2));

    }

}

6. 测试消费消息

KafkaLearnConsumer.java

package priv.fc.common_parent.learn.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

/**
 * Kafka学习消费者
 *
 * @author 付聪
 * @time 2024-08-11 09:37:25
 */
@Component
public class KafkaLearnConsumer {

    private static final Logger logger = LoggerFactory.getLogger(KafkaLearnConsumer.class);

    @KafkaListener(topics = "test")
    public void consume(ConsumerRecord<String, String> record) {
        logger.info("record是:{}", record);
        logger.info("value是:{}", record.value());
    }

    @KafkaListener(topics = "test", containerFactory = "firstDataSourceKafkaListenerContainerFactory")
    public void consume1(ConsumerRecord<String, String> record) {
        logger.info("record是:{}", record);
        logger.info("value是:{}", record.value());
    }

    @KafkaListener(topics = "test", containerFactory = "secondDataSourceKafkaListenerContainerFactory")
    public void consume2(ConsumerRecord<String, String> record) {
        logger.info("record是:{}", record);
        logger.info("value是:{}", record.value());
    }

}

8. 启动项目

2024-08-11 15:10:06,157 INFO  [main] o.a.k.c.u.AppInfoParser.<init>(117): Kafka version: 2.5.1
2024-08-11 15:10:06,158 INFO  [main] o.a.k.c.u.AppInfoParser.<init>(118): Kafka commitId: 0efa8fb0f4c73d92
2024-08-11 15:10:06,159 INFO  [main] o.a.k.c.u.AppInfoParser.<init>(119): Kafka startTimeMs: 1723360206157
2024-08-11 15:10:06,159 INFO  [main] o.a.k.c.c.KafkaConsumer.subscribe(974): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Subscribed to topic(s): test
2024-08-11 15:10:06,160 INFO  [main] o.s.s.c.ThreadPoolTaskScheduler.initialize(181): Initializing ExecutorService
2024-08-11 15:10:06,162 INFO  [main] o.a.c.h.Http11NioProtocol.log(173): Starting ProtocolHandler ["http-nio-8888"]
2024-08-11 15:10:06,227 INFO  [main] o.s.b.w.e.t.TomcatWebServer.start(220): Tomcat started on port(s): 8888 (http) with context path ''
2024-08-11 15:10:07,380 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.Metadata.update(277): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Cluster ID: 5WIA7foUSN-3iLQFV8vA7Q
2024-08-11 15:10:07,380 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.Metadata.update(277): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Cluster ID: 5WIA7foUSN-3iLQFV8vA7Q
2024-08-11 15:10:07,380 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.Metadata.update(277): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Cluster ID: 5WIA7foUSN-3iLQFV8vA7Q
2024-08-11 15:10:07,386 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(797): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Discovered group coordinator localhost:9092 (id: 2147483647 rack: null)
2024-08-11 15:10:07,386 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(797): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Discovered group coordinator localhost:9092 (id: 2147483647 rack: null)
2024-08-11 15:10:07,386 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(797): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Discovered group coordinator localhost:9092 (id: 2147483647 rack: null)
2024-08-11 15:10:07,398 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,398 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,399 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,440 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.AbstractCoordinator.joinGroupIfNeeded(455): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Join group failed with org.apache.kafka.common.errors.MemberIdRequiredException: The group member needs to have a valid member id before actually entering a consumer group
2024-08-11 15:10:07,440 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.AbstractCoordinator.joinGroupIfNeeded(455): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Join group failed with org.apache.kafka.common.errors.MemberIdRequiredException: The group member needs to have a valid member id before actually entering a consumer group
2024-08-11 15:10:07,440 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.AbstractCoordinator.joinGroupIfNeeded(455): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Join group failed with org.apache.kafka.common.errors.MemberIdRequiredException: The group member needs to have a valid member id before actually entering a consumer group
2024-08-11 15:10:07,441 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,441 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,442 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.AbstractCoordinator.sendJoinGroupRequest(552): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] (Re-)joining group
2024-08-11 15:10:07,458 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.performAssignment(611): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Finished assignment for group at generation 64: {consumer-common-parent-2-4da3eb1e-e025-4ce1-8b0b-1efab52b6e68=Assignment(partitions=[test-2, test-3]), consumer-common-parent-1-65b1a27e-0288-4cc4-b633-ea5e6e7496d4=Assignment(partitions=[test-0, test-1]), consumer-common-parent-3-deffe8ba-dac0-4eda-97bc-bc0bf81c789d=Assignment(partitions=[test-4])}
2024-08-11 15:10:07,467 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(503): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Successfully joined group with generation 64
2024-08-11 15:10:07,467 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(503): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Successfully joined group with generation 64
2024-08-11 15:10:07,467 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.AbstractCoordinator.onSuccess(503): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Successfully joined group with generation 64
2024-08-11 15:10:07,484 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.invokePartitionsAssigned(273): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Adding newly assigned partitions: test-3, test-2
2024-08-11 15:10:07,484 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.invokePartitionsAssigned(273): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Adding newly assigned partitions: test-4
2024-08-11 15:10:07,484 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.invokePartitionsAssigned(273): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Adding newly assigned partitions: test-1, test-0
2024-08-11 15:10:07,518 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.refreshCommittedOffsetsIfNeeded(799): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Setting offset for partition test-1 to the committed offset FetchPosition{offset=12, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[localhost:9092 (id: 0 rack: null)], epoch=0}}
2024-08-11 15:10:07,518 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.refreshCommittedOffsetsIfNeeded(799): [Consumer clientId=consumer-common-parent-3, groupId=common-parent] Setting offset for partition test-4 to the committed offset FetchPosition{offset=11, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[localhost:9092 (id: 0 rack: null)], epoch=0}}
2024-08-11 15:10:07,518 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.refreshCommittedOffsetsIfNeeded(799): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Setting offset for partition test-3 to the committed offset FetchPosition{offset=9, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[localhost:9092 (id: 0 rack: null)], epoch=0}}
2024-08-11 15:10:07,518 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.refreshCommittedOffsetsIfNeeded(799): [Consumer clientId=consumer-common-parent-1, groupId=common-parent] Setting offset for partition test-0 to the committed offset FetchPosition{offset=10, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[localhost:9092 (id: 0 rack: null)], epoch=0}}
2024-08-11 15:10:07,519 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.a.k.c.c.i.ConsumerCoordinator.refreshCommittedOffsetsIfNeeded(799): [Consumer clientId=consumer-common-parent-2, groupId=common-parent] Setting offset for partition test-2 to the committed offset FetchPosition{offset=6, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=Optional[localhost:9092 (id: 0 rack: null)], epoch=0}}
2024-08-11 15:10:07,543 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] o.s.k.l.KafkaMessageListenerContainer.info(292): common-parent: partitions assigned: [test-1, test-0]
2024-08-11 15:10:07,543 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] o.s.k.l.KafkaMessageListenerContainer.info(292): common-parent: partitions assigned: [test-4]
2024-08-11 15:10:07,543 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] o.s.k.l.KafkaMessageListenerContainer.info(292): common-parent: partitions assigned: [test-3, test-2]
2024-08-11 15:10:07,544 INFO  [main] p.f.c.m.MainApplication.logStarted(61): Started MainApplication in 36.901 seconds (JVM running for 42.065)

9. 验证是否成功

2024-08-11 15:14:21,758 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] p.f.c.l.k.c.Consumer.consume(16): record是:ConsumerRecord(topic = test, partition = 3, leaderEpoch = 0, offset = 9, CreateTime = 1723360461727, serialized key size = -1, serialized value size = 73, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = {"remark":"【第二个数据源】的消息:Hello Kafka!","delFlag":0})
2024-08-11 15:14:21,758 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] p.f.c.l.k.c.Consumer.consume2(28): record是:ConsumerRecord(topic = test, partition = 1, leaderEpoch = 0, offset = 12, CreateTime = 1723360461719, serialized key size = -1, serialized value size = 73, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = {"remark":"【第一个数据源】的消息:Hello Kafka!","delFlag":0})
2024-08-11 15:14:21,758 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] p.f.c.l.k.c.Consumer.consume(17): value是:{"remark":"【第二个数据源】的消息:Hello Kafka!","delFlag":0}
2024-08-11 15:14:21,758 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] p.f.c.l.k.c.Consumer.consume2(29): value是:{"remark":"【第一个数据源】的消息:Hello Kafka!","delFlag":0}
2024-08-11 15:14:21,760 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] p.f.c.l.k.c.Consumer.consume(16): record是:ConsumerRecord(topic = test, partition = 3, leaderEpoch = 0, offset = 10, CreateTime = 1723360461663, serialized key size = -1, serialized value size = 70, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = {"remark":"【默认数据源】的消息:Hello Kafka!","delFlag":0})
2024-08-11 15:14:21,760 INFO  [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] p.f.c.l.k.c.Consumer.consume(17): value是:{"remark":"【默认数据源】的消息:Hello Kafka!","delFlag":0}
  • 25
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

付聪1210

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值