/**
* Producer和Consumer只于Leader交互,其余Replica作为Follower从leader中复制数据。
* 每个partitions都有一个server为leader,leader负责所有的读写操作,follower只负责备份
* 一个server上有一个topic的多个partitions
*/
public long getTopicOffset(String topic) {
long sum = 0;
List<TopicPartition> topicPartitionList = new ArrayList<>();
KafkaConsumer consumer = null;
try {
consumer = new KafkaConsumer(props
);
Map<String, List<PartitionInfo>> topicPartition = consumer.listTopics();
if (null == topicPartition || topicPartition.size() == 0) {
return 0L;
}
List<PartitionInfo> partitionInfos = topicPartition.get(topic);
logger.info("partitionInfo Size:" + partitionInfos.size());
for (PartitionInfo item : partitionInfos) {
logger.info("partitionInfos:" + item);
topicPartitionList.add(new TopicPartition(topic, item.partition()));
}
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(topicPartitionList);
for (Long endOffset : endOffsets.values()) {
sum += endOffset;
}
logger.info("Offset sum:" + sum);
} catch (Exception e) {
logger.error("Error communicating with Broker to find Leader for [" + topic + ", ] Reason: " + e);
} finally {
if (consumer != null) {
consumer.close();
}
}
return sum;
}
public void initSubscribeInfo() {
props = new Properties();
KafkaProperties kafkaProc = KafkaProperties.getInstance();
// Broker连接地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
KafkaProperties.getInstance().getValues(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"));
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "leaderLookup" + new Date().getTime());
// 会话超时时间
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100000");
// 消息Key值使用的反序列化类 (*)
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.IntegerDeserializer");
// 消息内容使用的反序列化类 (*)
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringDeserializer");
// 安全协议类型
props.put(KafkaConsts.securityProtocol, kafkaProc.getValues(KafkaConsts.securityProtocol, "PLAINTEXT"));
// 服务名
props.put(KafkaConsts.saslKerberosServiceName, kafkaProc.getValues(KafkaConsts.saslKerberosServiceName, "kafka"));
// 域名
props.put(KafkaConsts.kerberosDomainName, kafkaProc.getValues(KafkaConsts.kerberosDomainName, "hadoop.hadoop.com"));
}