kafka java api
生产者
package com.wxa.storm.kafka;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import java.util.Properties;
import java.util.UUID;
public class simpleProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("metadata.broker.list", "hadoop01:9092,hadoop02:9092");
Producer<String,String> producer = new Producer<String, String>(new ProducerConfig(props));
for (int messageNo = 1; messageNo < 100000; messageNo++) {
/**
* 5、调用producer的send方法发送数据
* 注意:这里需要指定 partitionKey,用来配合自定义的MyLogPartitioner进行数据分发
*/
producer.send(new KeyedMessage<String, String>("ordermsg", messageNo + "", "appid" + UUID.randomUUID() + "itcast"));
}
}
}
消费者代码
package com.wxa.storm.kafka;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
public class simpleConsumer {
private static ConsumerConnector createconn(){
Properties props = new Properties();
props.put("group.id", "dashujujiagoushi");
props.put("zookeeper.connect", "hadoop01:2181,hadoop02:2181");//producer 需要的是broker consumer需要的是zookeeper
return Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
}
public static void main(String[] args) {
ConsumerConnector consumer = createconn();
Map<String,Integer> topicCountmap = new HashMap<>();
String topic ="ordermsg";
topicCountmap.put(topic,4);//这里的4是和topic 的分区相对应的
Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer.createMessageStreams(topicCountmap);
KafkaStream<byte[], byte[]> stream = messageStreams.get(topic).get(0);
//获取一个迭代器
ConsumerIterator<byte[], byte[]> iterator = stream.iterator();
while (iterator.hasNext()){
System.out.println(new String(iterator.next().message()));
}
}
}