packagecom.hashleaf.kafka;importjava.util.HashMap;importjava.util.List;importjava.util.Map;importjava.util.Properties;importjava.util.concurrent.ExecutorService;importjava.util.concurrent.Executors;importkafka.consumer.Consumer;importkafka.consumer.ConsumerConfig;importkafka.consumer.ConsumerIterator;importkafka.consumer.KafkaStream;importkafka.javaapi.consumer.ConsumerConnector;importkafka.message.MessageAndMetadata;importkafka.serializer.Decoder;importkafka.utils.VerifiableProperties;/*** 自定义消息消费者
*@authorxiaojf 294825811@qq.com
*@since2015-7-15 下午11:10:28*/
public classMyConsumer {private finalConsumerConnector consumer;publicMyConsumer(){
Properties originalProps= newProperties();//zookeeper 配置,通过zk 可以负载均衡的获取broker
originalProps.put("zookeeper.connect", "192.168.66.2:2181,192.168.66.3:2181,192.168.66.4:2181");//group 代表一个消费组
originalProps.put("group.id", "hashleaf-group");//zk连接超时时间
originalProps.put("zookeeper.session.timeout.ms", "10000");//zk同步时间
originalProps.put("zookeeper.sync.time.ms", "200");//自动提交间隔时间
originalProps.put("auto.commit.interval.ms", "1000");//消息日志自动偏移量,防止宕机后数据无法读取
originalProps.put("auto.offset.reset", "smallest");//序列化类
originalProps.put("serializer.class", "kafka.serializer.StringEncoder");//构建consumer connection 对象
consumer = Consumer.createJavaConsumerConnector(newConsumerConfig(originalProps));
}public voidconsume(){//指定需要订阅的topic
Map topicCountMap = new HashMap();
topicCountMap.put(MyProducer.HASHLEAF_KAFKA_TOPIC,new Integer(5));//指定key的编码格式
Decoder keyDecoder = new kafka.serializer.StringDecoder(newVerifiableProperties());//指定value的编码格式
Decoder valueDecoder = new kafka.serializer.StringDecoder(newVerifiableProperties());//获取topic 和 接受到的stream 集合
Map>> map =consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);//根据指定的topic 获取 stream 集合
List> kafkaStreams =map.get(MyProducer.HASHLEAF_KAFKA_TOPIC);
ExecutorService executor= Executors.newFixedThreadPool(4);//因为是多个 message组成 message set , 所以要对stream 进行拆解遍历
for(final KafkaStreamkafkaStream : kafkaStreams){
executor.submit(newRunnable() {
@Overridepublic voidrun() {//拆解每个的 stream
ConsumerIterator iterator =kafkaStream.iterator();while(iterator.hasNext()) {//messageAndMetadata 包括了 message , topic , partition等metadata信息
MessageAndMetadata messageAndMetadata =iterator.next();
System.out.println("message : " + messageAndMetadata.message() + " partition : " +messageAndMetadata.partition());
}
}
});
}
}public static voidmain(String[] args) {newMyConsumer().consume();
}
}