python kafka offset_kafka获得最新partition offset

kafka获得partition下标,需要用到kafka的simpleconsumer

import java.util.ArrayList;

import java.util.Collections;

import java.util.Date;

import java.util.HashMap;

import java.util.List;

import java.util.Map;

import java.util.Properties;

import java.util.TreeMap;

import java.util.Map.Entry;

import kafka.api.PartitionOffsetRequestInfo;

import kafka.common.TopicAndPartition;

import kafka.consumer.Consumer;

import kafka.consumer.ConsumerConfig;

import kafka.javaapi.OffsetResponse;

import kafka.javaapi.PartitionMetadata;

import kafka.javaapi.TopicMetadata;

import kafka.javaapi.TopicMetadataRequest;

import kafka.javaapi.consumer.ConsumerConnector;

import kafka.javaapi.consumer.SimpleConsumer;

public class KafkaOffsetTools {

public static void main(String[] args) {

// 读取kafka最新数据

// Properties props = new Properties();

// props.put("zookeeper.connect",

// "192.168.6.18:2181,192.168.6.20:2181,192.168.6.44:2181,192.168.6.237:2181,192.168.6.238:2181/kafka-zk");

// props.put("zk.connectiontimeout.ms", "1000000");

// props.put("group.id", "dirk_group");

//

// ConsumerConfig consumerConfig = new ConsumerConfig(props);

// ConsumerConnector connector =

// Consumer.createJavaConsumerConnector(consumerConfig);

String topic = "dirkz";

String seed = "118.26.148.18";

int port = 9092;

if (args.length >= 3) {

topic = args[0];

seed = args[1];

port = Integer.valueOf(args[2]);

}

List seeds = new ArrayList();

seeds.add(seed);

KafkaOffsetTools kot = new KafkaOffsetTools();

TreeMap metadatas = kot.findLeader(seeds, port, topic);

int sum = 0;

for (Entry entry : metadatas.entrySet()) {

int partition = entry.getKey();

String leadBroker = entry.getValue().leader().host();

String clientName = "Client_" + topic + "_" + partition;

SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000,

64 * 1024, clientName);

long readOffset = getLastOffset(consumer, topic, partition,

kafka.api.OffsetRequest.LatestTime(), clientName);

sum += readOffset;

System.out.println(partition+":"+readOffset);

if(consumer!=null)consumer.close();

}

System.out.println("总和:"+sum);

}

public KafkaOffsetTools() {

//m_replicaBrokers = new ArrayList();

}

//private List m_replicaBrokers = new ArrayList();

public static long getLastOffset(SimpleConsumer consumer, String topic,

int partition, long whichTime, String clientName) {

TopicAndPartition topicAndPartition = new TopicAndPartition(topic,

partition);

Map requestInfo = new HashMap();

requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(

whichTime, 1));

kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(

requestInfo, kafka.api.OffsetRequest.CurrentVersion(),

clientName);

OffsetResponse response = consumer.getOffsetsBefore(request);

if (response.hasError()) {

System.out

.println("Error fetching data Offset Data the Broker. Reason: "

+ response.errorCode(topic, partition));

return 0;

}

long[] offsets = response.offsets(topic, partition);

//long[] offsets2 = response.offsets(topic, 3);

return offsets[0];

}

private TreeMap findLeader(List a_seedBrokers,

int a_port, String a_topic) {

TreeMap map = new TreeMap();

loop: for (String seed : a_seedBrokers) {

SimpleConsumer consumer = null;

try {

consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024,

"leaderLookup"+new Date().getTime());

List topics = Collections.singletonList(a_topic);

TopicMetadataRequest req = new TopicMetadataRequest(topics);

kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

List metaData = resp.topicsMetadata();

for (TopicMetadata item : metaData) {

for (PartitionMetadata part : item.partitionsMetadata()) {

map.put(part.partitionId(), part);

//if (part.partitionId() == a_partition) {

//returnMetaData = part;

//break loop;

//}

}

}

} catch (Exception e) {

System.out.println("Error communicating with Broker [" + seed

+ "] to find Leader for [" + a_topic + ", ] Reason: " + e);

} finally {

if (consumer != null)

consumer.close();

}

}

//if (returnMetaData != null) {

//m_replicaBrokers.clear();

//for (kafka.cluster.Broker replica : returnMetaData.replicas()) {

//m_replicaBrokers.add(replica.host());

//}

//}

return map;

}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值