import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
public class KafkaConsumerMetrics {
private static final long FIVE_MINUTES = 5 * 60 * 1000;
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("group.id", "test-group");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
List<String> topics = Arrays.asList("topic1", "topic2", "topic3", "topic4", "topic5", "topic6", "topic7", "topic8", "topic9", "topic10");
Map<TopicPartition, Long> partitionOffsets = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
partitionOffsets.put(topicPartition, consumer.position(topicPartition));
}
}
consumer.assign(partitionOffsets.keySet());
long startTime = System.currentTimeMillis();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(FIVE_MINUTES);
if (records.isEmpty()) {
long elapsedTime = System.currentTimeMillis() - startTime;
System.out.println("No new records received in the last " + elapsedTime / 1000 + " seconds.");
}
for (ConsumerRecord<String, String> record : records) {
TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
long offset = partitionOffsets.get(topicPartition);
partitionOffsets.put(topicPartition, offset + 1);
}
if (System.currentTimeMillis() - startTime >= FIVE_MINUTES) {
for (String topic : topics) {
long totalOffset = 0;
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
long currentOffset = partitionOffsets.get(topicPartition);
totalOffset += currentOffset;
}
System.out.println("Total data consumed for " + topic + " in the last 5 minutes: " + totalOffset);
}
startTime = System.currentTimeMillis();
}
}
}
}
该代码使用KafkaConsumer来消费所有主题。对于每个主题,它获取所有分区的当前偏移量,并在每次接收到新记录时增加偏移量。然后,它每5分钟打印每个主题的总消耗量。