spring kafka实现数据消费(三)

55 篇文章 1 订阅

     在上一章实现的消费中,对数据的处理进行了优化。实现了数据的批量处理。但是还是有不足的。没有一个group的处理线程是一定的,如果在存在多个group的时候,假设group1只需要2个处理线程就能处理过来,而group2却需要10个处理线程才能处得过来。但是我们的处理线程都是固定的,这样就会导致有的消费能处理过来,有的却处理不过来。

     所以我们需要优化让所有的消费者使用同一组处理线程。消费者不识别数据是什么,让数据处理线程自己根据数据类型来决定自己需要做什么操作。

首先是消息管理器,初始化消息管理器时,需要指定kafka相关的配置,和数据处理的线程池。

public class Optimize2ConsumerManager {

    //处理数据处理线程池
    private ThreadPoolTaskExecutor messageHandleTaskExecutor;
    private List<MyOptimizeSpringKafkaContainer> containerList = new ArrayList<>();
    private List<MyOptimize2MessageListener> optimizeMessageListeners = new ArrayList<>();
    //消费者数量
    private int consumerSize = 2;

    private String topic;
    private String groupId;
    private String kafkaAddress;

    public Optimize2ConsumerManager(int consumerSize, String topic, String kafkaAddress,
                                    String groupId, ThreadPoolTaskExecutor messageHandleTaskExecutor) {
        this.consumerSize = consumerSize > 0 ? consumerSize : 2;
        this.topic = topic;
        this.kafkaAddress = kafkaAddress;
        this.groupId = groupId;
        this.messageHandleTaskExecutor = messageHandleTaskExecutor;
    }

    /**
     * 每个group启动指定个数的消费者和处理线程
     */

    public void startConsumeAndHandle() {
        //启动消费者数量
        for (int i = 0; i < consumerSize; i++) {
            //设置kafkaListener 使用 AcknowledgingMessageListener(当为 MANUAL_IMMEDIATE 或 MANUAL)
            MyOptimize2MessageListener myMessageListener = new MyOptimize2MessageListener(groupId + "_" + i, messageHandleTaskExecutor);
            optimizeMessageListeners.add(myMessageListener);

            MyOptimizeSpringKafkaContainer mySpringKafkaContainer = new MyOptimizeSpringKafkaContainer(myMessageListener);
            mySpringKafkaContainer.initContainer(kafkaAddress, topic, groupId);
            //开始消费数据
            mySpringKafkaContainer.startKafkaListen();
            containerList.add(mySpringKafkaContainer);
        }
    }

    public void stopConsumer(String groupId) {

        if(CollectionUtils.isNotEmpty(optimizeMessageListeners)){
            System.out.println("消息监听器:" + optimizeMessageListeners.size());
            optimizeMessageListeners.forEach(item -> {
                System.out.println("停止消息监听器:"+ item.getListenerId());
                item.stopListen();
            });
        }

        if (CollectionUtils.isNotEmpty(containerList)) {
            System.out.println("停止消费者:"+ groupId);
            try {
                containerList.forEach(MyOptimizeSpringKafkaContainer::stopKafkaListen);
            } catch (Exception e){
                e.printStackTrace();
            }
            containerList.clear();
        }
    }
}

消费容器,和上一章的实现一致:

public class MyOptimizeSpringKafkaContainer {
    /**
     * kafka消费者
     */
    private KafkaMessageListenerContainer<Integer, String> container;

    AcknowledgingMessageListener messageListener;

    public MyOptimizeSpringKafkaContainer(AcknowledgingMessageListener messageListener) {
        this.messageListener = messageListener;
    }

    /**
     * 参数初始化
     */

    public void initContainer(String kafkaAddress, String topic, String groupId) {
        //设置kafka参数
        Map<String, Object> properties = new HashMap<>(10);
        //kafka地址 ip:9092,ip:9092
        properties.put("bootstrap.servers", kafkaAddress);
        //消费者所属的消费组id
        properties.put("group.id", groupId);
        //设置为手动提交
        properties.put("enable.auto.commit", "false");
        //消费最新数据
        properties.put("auto.offset.reset", "latest");
        // 10 * 1024 * 1024,默认1M,设置为10M,解决单条消息过大导致无法消费的问题
        properties.put("fetch.message.max.bytes", "10485760");
        //key的解码类
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //value的解码类
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // 序列化类
        properties.put("serializer.class", "kafka.serializer.StringEncoder");
        properties.put("max.poll.records", "10");
        properties.put("session.timeout.ms", "30000");
        properties.put("request.timeout.ms", "31000");
        properties.put("fetch.max.wait.ms", "1000");

        //创建containerProperties
        ContainerProperties containerProperties = new ContainerProperties(topic);
        //设置手动立即提交,ACkMode不同则需要 不同的 GenericMessageListener
        containerProperties.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        //设置kafkaListener 使用 AcknowledgingMessageListener(当AckMode为 MANUAL_IMMEDIATE 或 MANUAL)
        containerProperties.setMessageListener(messageListener);

        //创建consumerFactory
        ConsumerFactory<Integer, String> consumerFactory = new DefaultKafkaConsumerFactory<>(properties);
        //创建container
        container = new KafkaMessageListenerContainer<>(consumerFactory, containerProperties);
        container.setAutoStartup(false);
    }

    /**
     * 开启监听kafka
     */

    public void startKafkaListen() {
        if (container != null) {
            container.start();
        }
    }

    /**
     * 开启监听kafka
     */
    public void stopKafkaListen() {
        if (container != null) {
            container.stop();
        }
    }
}

消息监听器:负责消费数据,并将数据放到线程池中。等待线程处理。

@Data
@AllArgsConstructor
public class MyKafkaRecord {
    private int partition;//分区
    private long offset;  //分区偏移量
    private Acknowledgment acknowledgment;  //记录
}
public class MyOptimize2MessageListener implements AcknowledgingMessageListener<String, String> {

    private String listenerId;

    private boolean isRunning = true;


    private ThreadPoolTaskExecutor messageHandleTaskExecutor;

    private List<String> messageList = new ArrayList<>();

    private Map<Integer, MyKafkaRecord> acknowledgmentMap = new HashMap<>();

    private int upSize = 5;

    public MyOptimize2MessageListener(String listenerId, ThreadPoolTaskExecutor messageHandleTaskExecutor){
        this.listenerId = listenerId;
        this.messageHandleTaskExecutor = messageHandleTaskExecutor;
    }

    @Override
    public void onMessage(ConsumerRecord<String, String> data, Acknowledgment acknowledgment) {
        addMessage(data,acknowledgment);
    }

    private void addMessage(ConsumerRecord data, Acknowledgment acknowledgment){
        System.out.println(String.format("%s消费数据:topic: %s,分区:%s,偏移量: %s,key: %s, value: %s",
                listenerId, data.topic(),data.partition(), data.offset(),data.key(), data.value()));
        try {
            //先放到list中
            synchronized (messageList) {   //必选要放在这里跟  addMessageToQueue 方法线程同步
                messageList.add(data.offset() + "_" + data.value());
                MyKafkaRecord record = new MyKafkaRecord(data.partition(), data.offset(), acknowledgment);
                acknowledgmentMap.put(data.partition(), record);
            }
            //达到要求,放到线程池等待执行
            if (messageList.size() > upSize) {
               addMessageToPool();
            }
        } catch (Exception e) {
            System.out.println(String.format("%s消费数据添加到队列异常:topic: %s,分区:%s,偏移量: %s, value: %s",
                    listenerId, data.topic(),data.partition(), data.offset(), data.value()));
        }
    }
    /**
     * 当 messageList 没达到 upSize 时可以调用该方法立即处理数据
     */
    public void addMessageToPool(){
        synchronized (messageList) {
            if (messageList.size() > 0) {
                OptimizeMessageHandleRunnable optimizeMessageHandleRunnable = new OptimizeMessageHandleRunnable(new ArrayList<>(messageList));
                messageHandleTaskExecutor.execute(optimizeMessageHandleRunnable);
                //自己提交自己的的偏移量
                acknowledgmentMap.entrySet().forEach(item -> {
                    item.getValue().getAcknowledgment().acknowledge();
                    System.out.println(String.format("%s提交偏移量,分区:%s ,偏移量:%s", listenerId, item.getKey(), item.getValue().getOffset()));
                });
                acknowledgmentMap.clear();   //重置
                messageList = new ArrayList<>();
            }
        }
    }

    public void stopListen(){
        isRunning = false;
    }

    public String getListenerId() {
        return listenerId;
    }
}

数据处理线程,处理线程负责对不同的数据做不同的处理:

public class OptimizeMessageHandleRunnable implements Runnable{

    private List<String> messages;

    public OptimizeMessageHandleRunnable(List<String> messages){
        this.messages = messages;
    }

    @Override
    public void run() {
        handle();
    }

    /**
     * 数据处理方法,这里需要区分好数据类型,区分是哪一个group消费的数据。可以通过在初始化的时候穿传入一个对象来区分
     */
    private void handle(){
        if (CollectionUtils.isNotEmpty(messages)) {
            System.out.println("处理消息:" + messages);
        }
    }
}

 测试方法:

@Component
public class KafkaMessageHandleTest implements CommandLineRunner, DisposableBean {


    private Map<String, Optimize2ConsumerManager> optimize2ConsumerManagerMap = new HashMap<>();
    public static ThreadPoolTaskExecutor threadPoolTaskExecutor;

    String topic = "KAFKA_CONSUME_TEST_MESSAGE_TOPIC";
    String groupId = "KAFKA_CONSUME_TEST_GROUP";
    String kafkaAddress = "ip:9092";

    @Override
    public void run(String... args) throws Exception {
        startConsumer();
    }

    public void startConsumer(){
        int size = 3;

        if(threadPoolTaskExecutor == null){
            threadPoolTaskExecutor = new MessageHandleExecutors().allMessageHandleTaskExecutor();
        }

        Optimize2ConsumerManager consumerManager = new Optimize2ConsumerManager(size,topic,kafkaAddress,groupId, threadPoolTaskExecutor);
        consumerManager.startConsumeAndHandle();
        optimize2ConsumerManagerMap.put(groupId, consumerManager);

    }

    public void stopConsumer(){
        optimize2ConsumerManagerMap.get(groupId).stopConsumer(groupId);
        optimize2ConsumerManagerMap.remove(groupId);
    }

    @Override
    public void destroy() throws Exception {
        stopConsumer();
    }
}

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值