【Kafka】SpringBoot项目配置多个Kafka

SpringBoot项目配置多个Kafka

1. 配置代码

package com.chehejia.osd.server.controller.listener.kafka;

import com.alibaba.fastjson.JSON;
import com.chehejia.framework.util.spring.SpringBeanUtil;
import com.chehejia.osd.server.common.canal.event.CanalEvent;
import com.chehejia.osd.server.common.canal.event.CanalEventProcessorHub;
import com.chehejia.osd.server.common.canal.processor.CanalEventProcessor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import java.util.Optional;

@Slf4j
@Component
public class CloudKafkaConsumerListener {

    /**
     * DTS监听 attendance_applies表与hr_outposting_staffs表数据变动
     *
     * @param record
     */
    @KafkaListener(topics = "#{'${spring.kafka.event.monitor-out-and-evection-staffs-info}'}", groupId = "a12n-cotransp_dev_consumer_for_dts", containerFactory = "kafkaCloudContainerFactory")
    public void listenDtsEvent(ConsumerRecord<?, ?> record) {
        try {
            Optional.ofNullable(record.value())
                .ifPresent(message -> {
                    log.info("======>[CloudKafkaConsumerListener::listenDtsEvent] 收到并开始消费消息,Topic: {},Partition: {}, keys: {}", record.topic(), record.partition(), record.key());
                    log.info("======>[CloudKafkaConsumerListener::listenDtsEvent] Body:{}", message);
                    CanalEvent canalEvent = JSON.parseObject(message.toString(), CanalEvent.class);
                    if (null == canalEvent) {
                        log.error("======>[CloudKafkaConsumerListener::listenDtsEvent] 消息格式错误 record:{}", record);
                        return;
                    }

                    Class<? extends CanalEventProcessor> serviceClass = CanalEventProcessorHub.CANAL_EVENT_PROCESSOR.get(canalEvent.getTable());
                    CanalEventProcessor eventProcessor = serviceClass != null ? SpringBeanUtil.getBean(serviceClass) : null;
                    if (eventProcessor != null) {
                        log.info("======>[CloudKafkaConsumerListener::listenDtsEvent]【MySQL_Canal_Event_Kafka】解析消息,database:{},table:{},type:{}", canalEvent.getDatabase(), canalEvent.getTable(), canalEvent.getType());
                        eventProcessor.process(record, canalEvent);
                    } else {
                        log.info("======>[CloudKafkaConsumerListener::listenDtsEvent]【MySQL_Canal_Event_Kafka】未注册该消息类型{}的监听器", canalEvent.getTable());
                    }

                });
        } catch (RuntimeException e) {
            log.error("======>[CloudKafkaConsumerListener::listenDtsEvent]【MySQL_Canal_Event_Kafka】业务消费出错,原始消息:" + record.toString() + ",出错原因:" + e.getMessage(), e);
        } catch (Exception e) {
            log.error("======>[CloudKafkaConsumerListener::listenDtsEvent]【MySQL_Canal_Event_Kafka】消费未知错误,原始消息:" + record.toString() + ",出错原因:" + e.getMessage(), e);
        }
    }
}

package com.chehejia.osd.server.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import javax.annotation.Resource;
import java.util.HashMap;
import java.util.Map;

/**
 * 融合云kafka配置类
 *
 * @author fanzhen1@lixiang.com
 * @version 1.0
 * @date 2024/05/11
 */
@Configuration
public class CloudKafkaConfig {
    @Resource
    private AppConfig appConfig;
    
    @Bean
    public KafkaTemplate<String, String> kafkaCloudTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

    @Bean(name = "kafkaCloudContainerFactory")
    KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaCloudContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    private ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    private ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    private Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>(3);
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, appConfig.getKafkaServers());
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    private Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>(4);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, appConfig.getKafkaServers());
        props.put(ConsumerConfig.GROUP_ID_CONFIG, appConfig.getKafkaGroupId());
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);
        return props;
    }
}

# --------------------
# 服务配置
# --------------------
osd.server.port = 12345
# 跨域配置
osd.mvc.cors.enable = true
# 日志级别
logging.level.root = info
logging.level.tracer = info
logging.level.com.chehejia = info

# --------------------
# 数据库配置
# --------------------
spring.datasource.dynamic.druid.testOnBorrow = true
mybatis-plus.configuration.log-impl = org.apache.ibatis.logging.stdout.StdOutImpl
# 默认数据库
osd.db.master.address = testdb57-02-mysql-test.chj.cloud:3306/osd_a12n_cotransp
osd.db.master.username = osd_a12n_cotransp_rw
osd.db.master.password = Xo34taA+tJXlchVN9*i
osd.db.slave.address = testdb57-02-mysql-test.chj.cloud:3306/osd_a12n_cotransp
osd.db.slave.username = osd_a12n_cotransp_rw
osd.db.slave.password = Xo34taA+tJXlchVN9*i
# coa 只读
osd.db.coa.address = osd.test.it.lixiangoa.com:3306/coa
osd.db.coa.username = osd_a12n_rw
osd.db.coa.password = wwDDtKrAidxhKqye
# 班车
osd.db.bus.address = tidb.test.lixiangoa.com:4000/a12n_bus_api
osd.db.bus.username = fanzhen1
osd.db.bus.password = 1LgTWPpxmRyPogQl

# --------------------
# redis配置
# --------------------
osd.redis.host = 172.24.192.38
osd.redis.port = 6436
osd.redis.password = osd-a12n-api-test-91778
osd.redis.database = 6

# --------------------
# dm 配置部分
# --------------------
dm.aeskey.endpoint = http://chehejia-service-bks-app.dev.k8s.chj.cloud/bks/v1-0/bdk/arc-db-key/latest

# --------------------
# kafka配置
# --------------------
# 指定kafka server的地址,集群配多个,中间逗号隔开
spring.kafka.bootstrap-servers = 10.134.80.233:9092,10.134.80.232:9092,10.134.80.231:9092
# 配置消费者
## 账号密码配置
spring.kafka.consumer.properties.security.protocol = SASL_PLAINTEXT
spring.kafka.consumer.properties.sasl.mechanism = PLAIN
spring.kafka.consumer.properties.sasl.jaas.config = org.apache.kafka.common.security.scram.ScramLoginModule required username="consumer" password="consumer1-1secret";
## 群组ID
spring.kafka.consumer.group-id = a12n-cotransp_dev_consumer_for_hr
## 是否自动提交offset
spring.kafka.consumer.enable-auto-commit = true
## 提交offset延时(接收到消息后多久提交offset)
spring.kafka.consumer.auto-commit-interval = 1000
## kv的序列化方式
spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer = org.apache.kafka.common.serialization.StringDeserializer
# 配置监听
## 在侦听器容器中运行的线程数
spring.kafka.listener.concurrency = 3
## 消费端监听的topic不存在时,项目启动会报错(关掉)
spring.kafka.listener.missing-topics-fatal = false

# 监听的kafka事件: 暂停恢复权限
spring.kafka.event.staff-account-update = hr-access-control-topic
# 监听的kafka事件: 员工入职
spring.kafka.event.staff-entry = hr-entry-staffs-info-topic-test
# 监听的kafka事件: 员工信息变更
spring.kafka.event.staff-update = updated-staff-topic-test
# 监听的kafka事件: 员工离职
spring.kafka.event.staff-leave = hr-all-staff-leave-test-topic
# 监听的kafka事件: 恢复入职
spring.kafka.event.staff-leave-recovery = hr-all-recovery-leave-topic-test
# 监听的kafka事件: 外援入职及信息变更
spring.kafka.event.vendor-staff = hr-vendor-info-topic
# 监听的kafka事件: v外援转普通外援消息
spring.kafka.event.v-vendor-to-vendor = hr-vendor-to-helper-topic
# 监听的kafka事件: 外援离职
spring.kafka.event.vendor-staff-leave = hr-delete-vendor-info-topic
# 监听的kafka事件: 员工本地外出、外派、外地出差事件
spring.kafka.event.out-and-evection-staffs-info = out_and_evection-staffs-info-topic-test
# 监听DTS的kafka事件: 监听hr_outposting_staffs_test表与attendance_applies_test表的增量数据
spring.kafka.event.monitor-out-and-evection-staffs-info = monitor-out-and-evection-staffs-info-topic-test

# --------------------
# esa服务配置
# --------------------
esa.zk.server = zk01-dev.chj.cloud:10311,zk02-dev.chj.cloud:10311,zk03-dev.chj.cloud:10311
esa.zk.connection.timeout = 60000
esa.zk.session.timeout = 7000
#开启监听,如果不配置或者配置为false则在项目启动的时候不初始化zk连接,也不监听事件。
esa.listener.enable = true
#目前推荐的事件投递协议基于netty实现,如果不写此参数或者写rest则都使用旧的rest的方式投递事件
esa.listener.protocol = event
#配合protocol协议使用,如果使用旧的rest的方式此参数无效,只有使用新的event协议此参数有效意为使用此端口监听事件
esa.listener.port = 23939
esa.listener.events = 0xE14b3ff

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

boy快快长大

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值