线程池来处理消费者任务,并使用ConsumerRebalanceListener保存和恢复分区偏移量

import org.apache.kafka.clients.consumer.*;  
import org.apache.kafka.common.TopicPartition;  
import java.util.*;  
import java.util.concurrent.*;  
  
public class KafkaConsumerMetrics {  
    private static final long FIVE_MINUTES = 5 * 60 * 1000;  
    private static final int NUM_CONSUMERS = 3;  
  
    private final KafkaConsumer<String, String> consumer;  
    private final ExecutorService executor;  
    private final Map<TopicPartition, Long> partitionOffsets;  
  
    public KafkaConsumerMetrics() {  
        Properties props = new Properties();  
        props.put("bootstrap.servers", "localhost:9092");  
        props.put("group.id", "test-group");  
        props.put("enable.auto.commit", "false");  
        props.put("auto.offset.reset", "earliest");  
        props.put("session.timeout.ms", "30000");  
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  
  
        this.consumer = new KafkaConsumer<>(props);  
        this.executor = Executors.newFixedThreadPool(NUM_CONSUMERS);  
        this.partitionOffsets = new HashMap<>();  
    }  
  
    public void run() {  
        List<String> topics = Arrays.asList("topic1", "topic2", "topic3", "topic4", "topic5", "topic6", "topic7", "topic8", "topic9", "topic10");  
  
        consumer.subscribe(topics, new SaveOffsetOnRebalance());  
  
        for (int i = 0; i < NUM_CONSUMERS; i++) {  
            executor.execute(new ConsumeRecords());  
        }  
  
        try {  
            Thread.sleep(Long.MAX_VALUE);  
        } catch (InterruptedException e) {  
            e.printStackTrace();  
        } finally {  
            consumer.close();  
            executor.shutdown();  
        }  
    }  
  
    private class SaveOffsetOnRebalance implements ConsumerRebalanceListener {  
        @Override  
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {  
            consumer.commitSync(partitionOffsets);  
        }  
  
        @Override  
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {  
            for (TopicPartition partition : partitions) {  
                consumer.seek(partition, partitionOffsets.get(partition));  
            }  
        }  
    }  
  
    private class ConsumeRecords implements Runnable {  
        @Override  
        public void run() {  
            long startTime = System.currentTimeMillis();  
  
            while (true) {  
                ConsumerRecords<String, String> records = consumer.poll(FIVE_MINUTES);  
  
                if (records.isEmpty()) {  
                    long elapsedTime = System.currentTimeMillis() - startTime;  
                    System.out.println("No new records received in the last " + elapsedTime / 1000 + " seconds.");  
                }  
  
                for (ConsumerRecord<String, String> record : records) {  
                    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());  
                    long offset = partitionOffsets.getOrDefault(topicPartition, 0L);  
                    partitionOffsets.put(topicPartition, offset + 1);  
                }  
  
                if (System.currentTimeMillis() - startTime >= FIVE_MINUTES) {  
                    for (String topic : consumer.subscription()) {  
                        long totalOffset = 0;  
                        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);  
                        for (PartitionInfo partitionInfo : partitionInfos) {  
                            TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());  
                            long currentOffset = partitionOffsets.getOrDefault(topicPartition, 0L);  
                            totalOffset += currentOffset;  
                        }  
                        System.out.println("Total data consumed for " + topic + " in the last 5 minutes: " + totalOffset);  
                    }  
                    consumer.commitAsync(partitionOffsets, null);  
                    startTime = System.currentTimeMillis();  
                }  
            }  
        }  
    }  
  
    public static void main(String[] args) {  
        new KafkaConsumerMetrics().run();  
    }  
}  

我们使用了线程池来处理消费者任务,并使用ConsumerRebalanceListener保存和恢复分区偏移量。我们还添加了一个默认值为0的getOrDefault调用,以避免在第一次消费分区时抛出NullPointerException。我们还将enable.auto.commit设置为false,因为我们手动提交偏移量,而不是依赖自动提交。最后,我们使用commitAsync代替commitSync提交偏移量,以避免阻塞线程。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值