kafka接入和集成springBoot

1.非spring项目配置kafka

很多时候,kafka服务都是单纯的数据清洗服务(比如单纯消息异步监听处理,非关键性业务剥离,大数据流式处理等)或者监听报警服务(比如限流,削峰,整合flink监听报警等),其实这个时候是不需要依赖spring的,可以简化服务配置。

1. 在resources目录下创建kafka配置文件(根据不同环境进行配置文件的编写,kafka-test.properties):

#需要消费的topic
topic=bi-data-sam
#消费topic下的具体某一个groupId
group.id=external-action-forwarder-sam
#kafka服务地址
bootstrap.servers=ckafka-floxf08f.ap-guangzhou.ckafka.tencentcloudmq.com:6018
#kafka账号用户名和密码
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="ckafka-floxf08f#k2t-qshare" password="Z5uH4prcr7KGqTEg6PjKBqqCqLcXpZyD";
#安全认证方式 kafka所有认证方式可以参考https://blog.csdn.net/u012036736/article/details/84747923
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
#默认值是 true;就是默认采用自动提交的机制,建议关闭,自行提交
enable.auto.commit=false
#注册consumer后offset从哪里开始消费,earliest是从最后提交的offset开始,没有就从头开始消费;
# latest是当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据;
# none是当topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
auto.offset.reset=earliest
#超时时间
session.timeout.ms=20000
#用于指定consumer两次poll的最大时间间隔(默认5分钟)
max.poll.interval.ms=500000
#单次拉取消息的最大长度(默认500)
max.poll.records=2000

2. 读取配置文件,加载Properties:

private final Properties config = new Properties();

protected void configure() {
    try (InputStream inputStream =
        Thread.currentThread()
            .getContextClassLoader()
            .getResourceAsStream(getKafkaPropertiesName(name))) {
      assert inputStream != null;
      config.load(inputStream);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }

    Map<String, String> topicAndGroup =
        ImmutableMap.of("kafka.topic.list", (String) config.remove("topic"));
  }

  /**
   * 根据环境确定读取什么配置文件
   * @return
   */
  String getKafkaPropertiesName(String name) {
    return String.format("kafka-%s.properties", name);
  }

3. 创建kafka consumer,一定要保证单例:

  @Singleton
  KafkaConsumer<byte[], byte[]> provideKafkaConsumer() {

    return new KafkaConsumer<>(
        config, Serdes.ByteArray().deserializer(), Serdes.ByteArray().deserializer());
  }

4. 针对consumer rebalance进行再平衡的时候的特殊处理:

@RequiredArgsConstructor
  private static final class HandleRebalance implements ConsumerRebalanceListener {

    private final KafkaConsumer<?, ?> consumer;

    @Override
    public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
      log.warn("Lost partitions in rebalance, committing current offsets.");
      try {
        consumer.commitSync();
      } catch (Exception e) {
        log.error("Committing offsets failed: ", e);
      }
    }

    @Override
    public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
      log.info("Partitions assigned: {}", partitions);
    }
  }

5. 监听kafka topic,拉取消息并解析:

//获取kafka消费者单例
  private final KafkaConsumer<byte[], byte[]> kafkaConsumer;

public static void main(String[] args) {
    log.info(
//当前环境从变量传入
    EnvType envType = EnvType.fromSystem();

//控制程序启动
    AtomicBoolean stopped = new AtomicBoolean(false);
    //程序shutdown之后置换变量,将程序强制结束,防止shuadown之后main函数不立即停止导致的问题
    Runtime.getRuntime()
        .addShutdownHook(Executors.defaultThreadFactory().newThread(() -> stopped.set(true)));
    try {
      app.start(stopped);
    } catch (Exception e) {
      log.error("App raise exception: ", e);
    } finally {
    //报错后直接停止consumer,根据实际业务逻辑来
      app.close();
    }
  }

private void start(AtomicBoolean stopped) {
    log.info("Starting kafka consumer app...");
    //注册服务
    kafkaConsumer.subscribe(
        Arrays.asList(StringUtils.split(kafkaTopicList, ',')), new HandleRebalance(kafkaConsumer));
    while (!stopped.get()) {
    //从group中拉取数据
      ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(TIMEOUT);
      if (records.isEmpty()) continue;
      monitor.reportAsync(MonitorType.MSG_RECEIVE, records.count());
      List<byte[]> bytes = new ArrayList<>(records.count());
      for (ConsumerRecord<byte[], byte[]> record : records) {
        bytes.add(record.value());
      }
      List<byte[]> bytesAfterMapping = mappingFunction.apply(bytes, observer);
      if (bytesAfterMapping.isEmpty()) continue;
     // todo 业务逻辑
     //提交
      kafkaConsumer.commitAsync(
          (offsets, exception) -> {
            if (exception != null) {
            //报错,异常处理
              monitor.reportAsync(MonitorType.COMMIT_OFFSET_FAILED, 1);
            }
          });
    }
  }

2.spring项目配置kafka

1.aplication.xml配置:

kafka:
  topic: bi-sandbox
  group-id: metadata-management-test
  bootstrap-servers: 100.119.167.50:6189
  enable-auto-commit: false
  auto-offset-reset: earliest
  session-timeout-ms: 20000
  max-poll-interval-ms: 500000
  max-poll-records: 2000
  producer-topic: metadata-management-test
  producer-bootstrap-servers: ckafka-3y356keb.ap-guangzhou.ckafka.tencentcloudmq.com:6001

2。使用ConfigurationProperties解析kafkaConfig配置文件:

@Component
@ConfigurationProperties(prefix = "kafka")
@Data
public class KafkaConfig {

    private String topic;

    private String groupId;

    private String bootstrapServers;

    private boolean enableAutoCommit;

    private String autoOffsetReset;

    private Integer sessionTimeoutMs;

    private Integer maxPollIntervalMs;

    private Integer maxPollRecords;

    private String producerTopic;


    private String producerBootstrapServers;
}

3.创建consumer properties:

 /**
     * 创建消费者
     *
     * @return
     */
    public Properties consumerConfigs() {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        //sasl配置
        props.put("sasl.jaas.config",
                "org.apache.kafka.common.security.plain.PlainLoginModule required "
                        + "username=\"ckafka-kekvburn#action-data-rw\" password=\"XhzfvbErGLMus6TkHSBbN6Yt\";");
        props.put("security.protocol", "SASL_PLAINTEXT");
        props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
        return props;
    }

4.创建实际的consumer消费者:

package com.tencent.metadata.api.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.Serdes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * kafka消费consumer
 *
 * @author bob
 * @since 2020-10-26
 */
@Component
public class Consumer implements InitializingBean {

    private static final Logger LOGGER = LoggerFactory.getLogger(Consumer.class);


    @Resource
    private com.tencent.metadata.api.config.KafkaConfig myConfig;

    private static final Duration TIMEOUT = Duration.ofSeconds(10);

    @Override
    public void afterPropertiesSet() throws Exception {
        new KafkaConsumerThread(myConfig).start();
    }


    public class KafkaConsumerThread extends Thread {

        private final List<ActionMessage> buffer = new ArrayList<>(500);


        private KafkaConsumer<byte[], byte[]> consumer;

        public KafkaConsumerThread(com.tencent.metadata.api.config.KafkaConfig myConfig) {
            this.consumer = provideKafkaClient(myConfig);
            this.consumer.subscribe(Arrays.asList(myConfig.getTopic()));
        }

        private KafkaConsumer<byte[], byte[]> provideKafkaClient(
                com.tencent.metadata.api.config.KafkaConfig myConfig) {


            return new KafkaConsumer<>(
                    myConfig.consumerConfigs(), Serdes.ByteArray().deserializer(), Serdes.ByteArray().deserializer());
        }

        @Override
        public void run() {
            try {
                while (!Thread.currentThread().isInterrupted()) {
                    ConsumerRecords<byte[], byte[]> records =
                            consumer.poll(TIMEOUT);
                    //解析数据
                    deserializeMessage(records);
                    if (buffer.isEmpty()) {
                        continue;
                    }
                    //有数据的时候的业务处理逻辑

                    //提交offset
                    consumer.commitAsync(
                            (offsets, exception) -> {
                                if (exception != null) {
                                    LOGGER.warn("Offset commit failed: ", exception);

                                }
                            });

                }
            } catch (Exception e) {
                LOGGER.error("consumer message error:", e);
            } finally {
                consumer.close();
            }
        }


        /**
         * 解析kafka消息
         *
         * @param records
         */
        private void deserializeMessage(ConsumerRecords<byte[], byte[]> records) {
            buffer.clear();
            long serverReceiveTime = System.currentTimeMillis();
            for (ConsumerRecord<byte[], byte[]> record : records) {
                //解析消息
            }
        }
    }


}

4.创建生产者properties:

 /**
     * 创建生产者
     *
     * @return
     */
    public Properties producerConfigs() {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, producerBootstrapServers);
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        props.put(ProducerConfig.RETRIES_CONFIG, 0);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        props.put("sasl.jaas.config",
                "org.apache.kafka.common.security.plain.PlainLoginModule required "
                        + "username=\"ckafka-3y356keb#metadata-test\" password=\"XhzfvbErGLMus6TkHSBbN6Yt\";");
        props.put("security.protocol", "SASL_PLAINTEXT");
        props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
        return props;
    }

4.创建实际的producer:

package com.tencent.metadata.api.kafka;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.Random;

/**
 * kafka生产producer
 *
 * @author bob
 * @since 2020-11-16
 */
@Component
public class Producer implements InitializingBean {

    private static final Logger LOGGER = LoggerFactory.getLogger(Producer.class);


    @Resource
    private com.tencent.metadata.api.config.KafkaConfig myConfig;

    private static KafkaProducer<String, String> producer;


    /**
     * 初始化producer
     * @throws Exception
     */
    @Override
    public void afterPropertiesSet() throws Exception {

        producer = new KafkaProducer<String, String>(
                myConfig.producerConfigs());
    }


    /**
     * 发送消息
     * @param data 消息体,json
     */
    public void sendMessage(String data) {
        ProducerRecord<String, String> message = new ProducerRecord<>(myConfig.getProducerTopic(),
                new Random().nextInt(10) + "", data);
        //异步发送
        producer.send(message, new DemoProducerCallback());
    }

    /**
     * 异步发送回调处理
     */
    class DemoProducerCallback implements Callback {
        @Override
        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
            if (e != null) {
                //如果Kafka返回一个错误,onCompletion方法抛出一个non null异常。
                LOGGER.error("异步发送消息失败", e);
            }
        }
    }
}



  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值