Kafka-流量控制(Consumer上线、下线 && ConsumerRebalanceListener)

"本文介绍了一种在Kafka消费者中实现消息消费的并发控制策略,以应对第三方工具并发限制。通过使用线程安全的计数器控制并发数量,当达到阈值时,消费者会"下线",释放其他消费者消费同一分区的消息,防止消息积压。此外,还展示了如何在重平衡时修正offset,以及如何手动提交和获取offset。"
摘要由CSDN通过智能技术生成

1. 需求

每一个消费者消费消息后,异步调用任务时需要对任务的并发数量进行控制

2. 实现思路

1. 消费者消费消息,发送异步任务

2. 添加线程安全的计数器控制并发数量

3. 消费者A并发阻塞后,需要让A进行"下线", 使得其他消费者空闲的情况下可以消费A对应分区的消息(ps: 每个分区只会对应一个消费者,不下线,其他消费者无法消费该分区会形成消息积压)

3. 代码实现

最新代码: git@github.com:hzhulan/kafkahelper.git

3.1 流量控制消费者

需要手动提交offset,并且在重平衡的时候通过重平衡监听的两个回调函数将offset进行修正。

package com.fh.kafka.kafkahelper.consumer;

import com.fh.kafka.kafkahelper.common.bean.KafkaConfig;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

/**
 * 需求:kafka的消费者中消费消息后,调用第三方工具发送异步任务,但是第三方工具的并发有一定的限制,需要我们进行控制
 * 实现: 定义一个线程安全的计数器TaskCount, 根据并发限制消费者的上线、下线。
 * 并发足够:消费者消费;并发不够,消费者下线,防止该分区阻塞
 * 例如: 3个分区对应最多3个消费者同时消费,一个消费者对应一个分区。但是消费者A的并发用完了,一直卡着,他对应分区后面的消息无法被其他消费者消费到,
 * 形成了消息积压,这个时候会将该消费者“下线”, 让其消费者B或消费者C“兼职”消费该分区。
 */
public class ConcurrentConsumer {

    private static final Logger LOGGER = LoggerFactory.getLogger(ConcurrentConsumer.class);

    private String name;

    private TaskCount count = new TaskCount(3);


    private List<String> data = new ArrayList<>();

    private KafkaConfig kafkaConfig;

    private KafkaConsumer<String, String> consumer;

    /**
     * 抢救时间,单位:秒
     */
    private static final int RESCUE_SECOND = 3;

    /**
     * 模拟任务执行时间,单位:秒
     */
    private static final int TASK_EXECUTE_TIME = 5;

    /**
     * 重新订阅时间间隔,单位:秒
     */
    private static final int RESUBSCRIBE_PERIOD = 60;

    /**
     * 测试使用暂时用,可以持久化到数据库中
     */
    private Map<TopicPartition, OffsetAndMetadata> currentOffset = new ConcurrentHashMap<>();

    public ConcurrentConsumer(String name) {
        this.name = name;
        this.kafkaConfig = new KafkaConfig();
        buildConsumer();
    }

    public void buildConsumer() {

        //1.创建消费者
        this.consumer = new KafkaConsumer<>(this.kafkaConfig.buildConsumerProps());

        //2.订阅Topic
        this.consumer.subscribe(Collections.singletonList(this.kafkaConfig.getTopic()));

    }

    public void consume() {
        this.consume(true);
    }

    /**
     * 消费主体
     * @param isFirst
     */
    private void consume(boolean isFirst) {

        // 阻塞等待
        waitForExecute();

        if (!isFirst) {
            reSubscribe();
        }

        LOGGER.info("【{}】上线", this.name);


        try {
            outWhile:
            while (count.hasAuth()) {

                ConsumerRecords<String, String> records = consumer.poll(100);
                for (ConsumerRecord<String, String> record : records) {

                    // 消费者抢救,如果等待后仍无权限,退出循环,下线
                    if (!count.hasAuth()) {
                        TimeUnit.SECONDS.sleep(RESCUE_SECOND);
                        if (!count.hasAuth()) {
                            break outWhile;
                        }
                    }

                    // 日志打印,可忽略
                    data.add(record.value());
                    LOGGER.info("【{}消费消息】size: {}, partition: {}, offset: {}, value: {}\ndata: {}.", name,
                            data.size(),record.partition(), record.offset(), record.value(), data);

                    // 获取执行资格
                    count.acquire();

                    // 设置需要提交的偏移量
                    currentOffset.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1));

                    // 提交偏移量
                    commitOffset();

                    // 执行内容
                    new Thread(() -> {
                        try {
                            TimeUnit.SECONDS.sleep(TASK_EXECUTE_TIME);
                        } catch (InterruptedException e) {
                            LOGGER.error("等待中断", e);
                        } finally {
                            count.release();
                        }
                    }).start();

                }
            }

        } catch (Exception e) {
            LOGGER.error("执行异常", e);
        }

        // 下线
        logout();
        this.consume(false);
    }

    /**
     * 阻塞等待执行权限
     */
    private void waitForExecute() {
        while (!count.hasAuth()) {
            try {
                TimeUnit.SECONDS.sleep(RESUBSCRIBE_PERIOD);
            } catch (InterruptedException e) {
                LOGGER.error("【等待中断】", e);
            }

            LOGGER.info("【{}排队等待获取执行权限】", name);
        }
    }

    /**
     * 重新"上线"
     */
    private void reSubscribe() {
        this.consumer = new KafkaConsumer<>(this.kafkaConfig.buildConsumerProps());
        this.consumer.subscribe(Collections.singletonList(this.kafkaConfig.getTopic()), new ConsumerRebalanceListener() {

            /**
             * rebalance之前调用
             * @param partitions
             */
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                LOGGER.info("=============== 重平衡开始 ==============");
                commitOffset();
            }

            /**
             * rebalance之后调用, 对变化的分区使用保存的offset,进行seek
             * @param partitions
             */
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                try {
                    for (TopicPartition partition : partitions) {
                        if (getOffset(partition) != null) {
                            consumer.seek(partition, getOffset(partition));
                            LOGGER.info("【重平衡seek】partition: {}, offset: {}.", partition.partition(), getOffset(partition));
                        } else {
                            LOGGER.info("【offset为空】partition: {}", partition.partition());
                        }
                    }
                } catch (Exception e) {
                    LOGGER.error("重平衡异常", e);
                }
                LOGGER.info("=============== 重平衡结束 ==============");
            }
        });
        this.consumer.resume(this.consumer.assignment());
    }

    /**
     * 消费者下线
     */
    private void logout() {
        this.consumer.unsubscribe();//此处不取消订阅暂停太久会出现订阅超时的错误
        this.consumer.pause(consumer.assignment());
    }

    /**
     * ============================ 自定义offset, 可以放到数据库中进行维护 =================================
     */
    private Long getOffset(TopicPartition partition) {
        if (currentOffset.get(partition) == null) {
            return null;
        }
        return currentOffset.get(partition).offset();
    }

    private void commitOffset() {

        this.consumer.commitAsync(currentOffset, (offsets, exception) -> {
            if (exception != null) {
                LOGGER.info("commit失败!!!!!!!!!!!!!!!!!");
            }
        });

        currentOffset.clear();
    }


    public static void main(String[] args) {
        ConcurrentConsumer factory = new ConcurrentConsumer(String.format("消费者"));
        factory.consume();
    }
}

3.2 普通消费者

package com.fh.kafka.kafkahelper.consumer;

import com.fh.kafka.kafkahelper.common.bean.KafkaConfig;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * 简单消费者不作并发限制, 和ConsumerFactory中的消费者一起消费
 */
public class SimpleConsumer {

    private static final Logger LOGGER = LoggerFactory.getLogger(SimpleConsumer.class);

    private KafkaConfig kafkaConfig;

    private KafkaConsumer<String, String> consumer;

    /**
     * 测试使用暂时用,可以持久化到数据库中
     */
    private Map<TopicPartition, OffsetAndMetadata> currentOffset = new ConcurrentHashMap<>();

    public SimpleConsumer() {
        this.kafkaConfig = new KafkaConfig();
        buildConsumer();
    }

    private KafkaConsumer buildConsumer() {
        //1.创建消费者
        this.consumer = new KafkaConsumer<>(this.kafkaConfig.buildConsumerProps());

        //2.订阅Topic
        //创建一个只包含单个元素的列表,Topic的名字叫作customerCountries
        this.consumer.subscribe(Collections.singletonList(kafkaConfig.getTopic()), new ConsumerRebalanceListener() {
            /**
             * rebalance之前调用
             * @param partitions
             */
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                LOGGER.info("=============== 重平衡开始 ==============");
                commitOffset();
            }

            /**
             * rebalance之后调用, 对变化的分区使用保存的offset,进行seek
             * @param partitions
             */
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                try {
                    for (TopicPartition partition : partitions) {
                        if (getOffset(partition) != null) {
                            consumer.seek(partition, getOffset(partition));
                            LOGGER.info("【重平衡seek】partition: {}, offset: {}.", partition.partition(), getOffset(partition));
                        } else {
                            LOGGER.info("【offset为空】partition: {}", partition.partition());
                        }
                    }
                } catch (Exception e) {
                    LOGGER.error("重平衡异常", e);
                }
                LOGGER.info("=============== 重平衡结束 ==============");
            }
        });

        return consumer;
    }

    public void consume() {
        LOGGER.info("【普通消费者】上线");
        AtomicInteger count = new AtomicInteger(0);
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(100);

                for (ConsumerRecord<String, String> record : records) {
                    int number = count.incrementAndGet();
                    LOGGER.info("【普通消费者】No.{} parition: {}, offset: {}, value: {}", number, record.partition(), record.offset(),record.value());
                }
                commitOffset();
            }
        } catch (Exception e) {
            LOGGER.error("消费异常", e);
        } finally {
            LOGGER.info("【普通消费者】下线");
        }
    }

    /**
     * ============================ 自定义offset, 可以放到数据库中进行维护 =================================
     */
    private Long getOffset(TopicPartition partition) {
        if (currentOffset.get(partition) == null) {
            return null;
        }
        return currentOffset.get(partition).offset();
    }

    private void commitOffset() {

        this.consumer.commitAsync((offsets, exception) -> {
            if (exception != null) {
                LOGGER.info("commit失败!!!!!!!!!!!!!!!!!");
            }
        });

        currentOffset.clear();
    }

    public static void main(String[] args) {
        new SimpleConsumer().consume();
    }
}

3.3 生产者

package com.fh.kafka.kafkahelper.producer;

import com.fh.kafka.kafkahelper.common.bean.KafkaConfig;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.List;

/**
 * 生产者
 */
public class GbdProducer {

    private static final Logger LOGGER = LoggerFactory.getLogger(GbdProducer.class);

    private static KafkaProducer producer = null;

    private static KafkaConfig kafkaConfig;

    static {
        kafkaConfig = new KafkaConfig();
    }

    private static void init() {
        if (producer == null) {
            producer = new KafkaProducer<String, String>(kafkaConfig.buildProducerProps());
        }
    }


    public static void send(String topic, String value) {

        init();

        try {
//            ProducerRecord<String, String> record = new ProducerRecord<>(topic, value);
//            producer.send(record);
            int v = Integer.parseInt(value);
            ProducerRecord<String, String> record = new ProducerRecord<>(topic, v % 3, "test", value);
            producer.send(record);
            LOGGER.info("发送消息: {}, partition: {}.", v, v % 3);

        } catch (Exception e) {
            LOGGER.error("发送消息异常", e);
        } finally {
            producer.close();
            producer = null;
        }

    }

    public static void send(String topic, List<String> values) {

        init();

        try {
            // 使用轮询方式

            for (int i = 0; i < values.size(); i++) {
                ProducerRecord<String, String> record = new ProducerRecord<>(topic, i % 3, "test", values.get(i));
                producer.send(record);
                LOGGER.info("发送消息: {}, partition: {}.", values.get(i), i % 3);
            }
        } catch (Exception e) {
            LOGGER.error("发送消息异常", e);
        } finally {
            producer.close();
            producer = null;
        }

    }

    public static void main(String[] args) {
        List<String> msgList = new ArrayList<>();
        for (int i = 0; i < 15; i++) {
            msgList.add(String.format("%d", i));
        }
        GbdProducer.send(kafkaConfig.getTopic(), msgList);
    }
}

4. 验证

4.1 创建topic

分区数设置为3  --partitions 3

.\bin\windows\kafka-topics.bat --zookeeper localhost:2181 --create --replication-factor 1 --partitions 3 --topic msg

4.2 查看Topic

.\bin\windows\kafka-topics.bat --zookeeper localhost:2181 --describe --topic msg

4.3 消费者消费分区查看

windows命令(linux跟换.bat为.sh)如下

kafka-consumer-groups.bat --bootstrap-server localhost:9092 --group jsGroup --describe

 可以看到3个消费者分别对应3个不同的分区

4.4 代码运行结果查看

4.4.1 单个消费者(并发为3)开始依次执行

 并发达到阈值时候消费者下线,并发减小后重新上线

4.4.2 两个消费者ConcurrentConsumer、SimpleConsumer一起消费

内容待整理:

参考内容: https://www.cnblogs.com/yaohaitao/p/12172867.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值