https://xuemengran.blog.csdn.net/article/details/103875884
KafkaOffsetMonitor-assembly-0.4.1-SNAPSHOT.jar
KafkaOffsetMonitor-assembly-0.2.1.jar
package com.test;
import java.util.*;
import java.util.Map.Entry;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.network.BlockingChannel;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//import pers.xmr.bigdata.basic.EmailSendInfo;
//import pers.xmr.bigdata.basic.EmailSender;
//import pers.xmr.bigdata.basic.Property;
/**
* @date 2020/1/6 10:04
*/
@Slf4j
public class KafkaOffsetTools {
private final static Logger logger = LoggerFactory.getLogger(KafkaOffsetTools.class);
private static String topics = "policy-topic";
private static String broker = "IP";
private static int port = 9092;
private static String clientId = "clientId";
private static String groups = "topic-group";
private static String servers = "IP:9092";
public static void main(String[] args) throws InterruptedException {
// String topics = "policy";
// String broker = Property.getProperty("broker");
// String servers = Property.getProperty("servers");
// String clientId = Property.getProperty("clientId");
int correlationId = 0;
while (true) {
List<String> brokerlist = new ArrayList<>();
brokerlist.add(broker);
KafkaOffsetTools kafkaOffsetTools = new KafkaOffsetTools();
String[] topicArgs = topics.split(",");
StringBuilder sb = new StringBuilder();
for (String topic : topicArgs) {
TreeMap<Integer, PartitionMetadata> metadatas = kafkaOffsetTools.findLeader(brokerlist, port, topic);
List<TopicAndPartition> partitions = new ArrayList<>();
for (Entry<Integer, PartitionMetadata> entry : metadatas.entrySet()) {
int partition = entry.getKey();
TopicAndPartition testPartition = new TopicAndPartition(topic, partition);
partitions.add(testPartition);
}
// String groups = Property.getProperty(topic);
String[] groupArgs = groups.split(",");
sb.setLength(0);
BlockingChannel channel = new BlockingChannel(broker, port, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000);
for (String group : groupArgs) {
long sumLogSize = 0L;
long sumOffset = 0L;
long lag = 0L;
KafkaConsumer<String, String> kafkaConsumer = kafkaOffsetTools.getKafkaConsumer(group, topic, servers);
for (Entry<Integer, PartitionMetadata> entry : metadatas.entrySet()) {
int partition = entry.getKey();
try {
channel.connect();
OffsetFetchRequest fetchRequest = new OffsetFetchRequest(group, partitions, (short) 1, correlationId, clientId);
channel.send(fetchRequest.underlying());
/*
* 消费的commited offset, 针对kafka 0.9及以后的版本, 提交的offset可以选择保存在broker上的__consumer_offsets的内部topic上, Burrow还是通过sarama来消费__consumer_offsets这个topic来获取;
*/
OffsetAndMetadata committed = kafkaConsumer.committed(new TopicPartition(topic, partition));
long partitionOffset = committed.offset();
sumOffset += partitionOffset;//消费偏移量大小
/*
需要获取各group的消费的topic的各个partition的broker offset,就是实际生产的msg的条数, 通过sarama可以轻松获取, 当然这个需要周期性不间断获取;
* */
String leadBroker = entry.getValue().leader().host();
String clientName = "Client_" + topic + "_" + partition;
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
// long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), consumer.clientId());
sumLogSize += readOffset;
logger.info("group: " + group + " " + partition + ":" + readOffset);
consumer.close();
} catch (Exception e) {
e.printStackTrace();
channel.disconnect();
}
}
logger.info("logSize:" + sumLogSize);
logger.info("offset:" + sumOffset);
lag = sumLogSize - sumOffset;
logger.info("lag:" + lag);
sb.append("消费者组 " + group + " 积压的偏移量为: " + lag).append("\n");
}
// String title = topic + " 消费者消费情况";
// EmailSender emailSender = new EmailSender();
// emailSender.sendMail(title, sb.toString());
}
Thread.sleep(60000 * Integer.valueOf(1000));
}
}
/**
* 获取Kafka消费者实例
*
* @param group 消费者组
* @param topic 主题名
* @param servers 服务器列表
* @return KafkaConsumer<String, String>
*/
private KafkaConsumer<String, String> getKafkaConsumer(String group, String topic, String servers) {
Properties props = new Properties();
props.put("bootstrap.servers", servers);
props.put("group.id", group);
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("max.poll.records", 100);
props.put("session.timeout.ms", "30000");
props.put("auto.offset.reset", "earliest");
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(topic));
return consumer;
}
private KafkaOffsetTools() {
}
/**
* 获取该消费者组每个分区最后提交的偏移量
*
* @param consumer 消费者组对象
* @param topic 主题
* @param partition 分区
* @param whichTime 最晚时间
* @param clientName 客户端名称
* @return 偏移量
*/
private static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
}
/**
* 获取每个partation的元数据信息
*
* @param seedBrokers 服务器列表
* @param port 端口号
* @param topic 主题名
* @return TreeMap<Integer, PartitionMetadata>
*/
private TreeMap<Integer, PartitionMetadata> findLeader(List<String> seedBrokers, int port, String topic) {
TreeMap<Integer, PartitionMetadata> map = new TreeMap<>();
for (String broker : seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(broker, port, 100000, 64 * 1024, "leaderLookup" + new Date().getTime());
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
map.put(part.partitionId(), part);
}
}
} catch (Exception e) {
System.out.println("Error communicating with Broker [" + broker + "] to find Leader for [" + topic + ", ] Reason: " + e);
} finally {
if (consumer != null)
consumer.close();
}
}
return map;
}
}