packagecom.cuicui.kafkademon;importjava.nio.ByteBuffer;importjava.util.Collections;importjava.util.HashMap;importjava.util.List;importjava.util.Map;importkafka.api.FetchRequest;importkafka.api.FetchRequestBuilder;importkafka.api.PartitionOffsetRequestInfo;importkafka.cluster.Broker;importkafka.common.TopicAndPartition;importkafka.javaapi.FetchResponse;importkafka.javaapi.OffsetRequest;importkafka.javaapi.OffsetResponse;importkafka.javaapi.PartitionMetadata;importkafka.javaapi.TopicMetadata;importkafka.javaapi.TopicMetadataRequest;importkafka.javaapi.TopicMetadataResponse;importkafka.javaapi.consumer.SimpleConsumer;importkafka.javaapi.message.ByteBufferMessageSet;importkafka.message.Message;importkafka.message.MessageAndOffset;/*** offset自己维护 目标topic、partition均由自己分配
*
*@author崔磊
* @date 2015年11月4日 上午11:44:15
**/
public classMySimpleConsumer {public static voidmain(String[] args) {newMySimpleConsumer().consume();
}/*** 消费消息*/
public voidconsume() {int partition = 0;//找到leader
Broker leaderBroker =findLeader(KafkaProperties.BROKER_CONNECT, KafkaProperties.TOPIC, partition);//从leader消费
SimpleConsumer simpleConsumer =
new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), 20000, 10000, "mySimpleConsumer");long startOffet = 1;int fetchSize = 1000;while (true) {long offset =startOffet;//添加fetch指定目标tipic,分区,起始offset及fetchSize(字节),可以添加多个fetch
FetchRequest req =
new FetchRequestBuilder().addFetch(KafkaProperties.TOPIC, 0, startOffet, fetchSize).build();//拉取消息
FetchResponse fetchResponse =simpleConsumer.fetch(req);
ByteBufferMessageSet messageSet=fetchResponse.messageSet(KafkaProperties.TOPIC, partition);for(MessageAndOffset messageAndOffset : messageSet) {
Message mess=messageAndOffset.message();
ByteBuffer payload=mess.payload();byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
String msg= newString(bytes);
offset=messageAndOffset.offset();
System.out.println("partition : " + 3 + ", offset : " + offset + " mess : " +msg);
}//继续消费下一批
startOffet = offset + 1;
}
}/*** 找到制定分区的leader broker
*
*@parambrokerHosts broker地址,格式为:“host1:port1,host2:port2,host3:port3”
*@paramtopic topic
*@parampartition 分区
*@return
*/
public Broker findLeader(String brokerHosts, String topic, intpartition) {
Broker leader=findPartitionMetadata(brokerHosts, topic, partition).leader();
System.out.println(String.format("Leader tor topic %s, partition %d is %s:%d", topic, partition, leader.host(),
leader.port()));returnleader;
}/*** 找到指定分区的元数据
*
*@parambrokerHosts broker地址,格式为:“host1:port1,host2:port2,host3:port3”
*@paramtopic topic
*@parampartition 分区
*@return元数据*/
private PartitionMetadata findPartitionMetadata(String brokerHosts, String topic, intpartition) {
PartitionMetadata returnMetaData= null;for (String brokerHost : brokerHosts.split(",")) {
SimpleConsumer consumer= null;
String[] splits= brokerHost.split(":");
consumer= new SimpleConsumer(splits[0], Integer.valueOf(splits[1]), 100000, 64 * 1024, "leaderLookup");
List topics =Collections.singletonList(topic);
TopicMetadataRequest request= newTopicMetadataRequest(topics);
TopicMetadataResponse response=consumer.send(request);
List topicMetadatas =response.topicsMetadata();for(TopicMetadata topicMetadata : topicMetadatas) {for(PartitionMetadata PartitionMetadata : topicMetadata.partitionsMetadata()) {if (PartitionMetadata.partitionId() ==partition) {
returnMetaData=PartitionMetadata;
}
}
}if (consumer != null)
consumer.close();
}returnreturnMetaData;
}/*** 根据时间戳找到某个客户端消费的offset
*
*@paramconsumer SimpleConsumer
*@paramtopic topic
*@parampartition 分区
*@paramclientID 客户端的ID
*@paramwhichTime 时间戳
*@returnoffset*/
public long getLastOffset(SimpleConsumer consumer, String topic, int partition, String clientID, longwhichTime) {
TopicAndPartition topicAndPartition= newTopicAndPartition(topic, partition);
Map requestInfo =
new HashMap();
requestInfo.put(topicAndPartition,new PartitionOffsetRequestInfo(whichTime, 1));
OffsetRequest request= newOffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientID);
OffsetResponse response=consumer.getOffsetsBefore(request);long[] offsets =response.offsets(topic, partition);return offsets[0];
}
}