项目至少包含kafka-clients-0.10.1.1.jar(主要是给在windows下运行kafka用,相当于windows是客户端) 和kafka_2.10-0.10.1.1.jar及scala-library-2.10.6.jar,metrics-core-2.2.0.jar
如果要结合storm,则还要包含storm-core-0.10.2.jar
项目包含文件
1)KafkaProperties属性文件
package com.cloudy.kafka;
public interface KafkaProperties
{
final static String zkConnect = "192.168.1.116:2181";
final static String broker_list = "192.168.1.116:9092" ;
final static String hbase_zkList = "192.168.1.116:2181";
// final static String zkConnect = "192.168.113.80:2181,192.168.113.81:2181,192.168.113.82:2181";
// final static String broker_list = "192.168.113.80:9092,192.168.113.81:9092,192.168.113.82:9092" ;
// final static String hbase_zkList = "10.161.164.202,10.161.164.203" ;
final static String groupId = "group1";
final static String topic = "log";
final static String Order_topic = "track_log";
final static String Log_topic = "log3";
final static String Order = "order";
// final static String kafkaServerURL = "localhost";
// final static int kafkaServerPort = 9092;
// final static int kafkaProducerBufferSize = 64*1024;
// final static int connectionTimeOut = 100000;
// final static int reconnectInterval = 10000;
// final static String topic2 = "topic2";
// final static String topic3 = "topic3";
// final static String clientId = "SimpleConsumerDemoClient";
}
2)Producer 文件:生成者,发送消息到broker也就是kafka服务器,当然要先启动
libexec/bin> ./kafka-server-start.sh ../config/server.properties
以及创建 ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic log
package com.cloudy.kafka;
import java.util.Properties;
import java.util.Random;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class Producer extends Thread {
private final kafka.javaapi.producer.Producer<Integer, String> producer;
private final String topic;
private final Properties props = new Properties();
public Producer(String topic) {
props.put("serializer.class", "kafka.serializer.StringEncoder");// 字符串消息
props.put("metadata.broker.list","192.168.1.116:9092");
producer = new kafka.javaapi.producer.Producer<Integer, String>(
new ProducerConfig(props));
this.topic = topic;
}
public void run() {
// for (int i = 0; i < 2000; i++) {
// String messageStr = new String("Message_" + i);
// System.out.println("product:"+messageStr);
// producer.send(new KeyedMessage<Integer, String>(topic, messageStr));
// }
Random random = new Random();
String[] hosts = { "www.taobao.com" };
String[] session_id = { "ABYH6Y4V4SCVXTG6DPB4VH9U123", "XXYH6YCGFJYERTT834R52FDXV9U34", "BBYH61456FGHHJ7JL89RG5VV9UYU7",
"CYYH6Y2345GHI899OFG4V9U567", "VVVYH6Y4V4SFXZ56JIPDPB4V678" };
String[] time = { "2014-01-07 08:40:50", "2014-01-07 08:40:51", "2014-01-07 08:40:52", "2014-01-07 08:40:53",
"2014-01-07 09:40:49", "2014-01-07 10:40:49", "2014-01-07 11:40:49", "2014-01-07 12:40:49" };
String[] province_id = { "1","2","3","4","5","6" };
for (int i = 0; i < 500; i++) {
//url sessionid time province_id
producer.send(new KeyedMessage<Integer, String>(topic, hosts[0]+"\t"+session_id[random.nextInt(5)]+"\t"+time[random.nextInt(8)]+"\t"+province_id[random.nextInt(5)]));
// queue.add(hosts[0]+"\t"+session_id[random.nextInt(5)]+"\t"+time[random.nextInt(8)]+"\t"+province_id[random.nextInt(5)]);
}
}
public static void main(String[] args) {
Producer producerThread = new Producer("log");
producerThread.start();
}
}
3)OrderConsumer 消费者,从kafka的broker里取消息,然后放到queue里(可供后面的storm的spout使用)
package com.cloudy.kafka;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
public class OrderConsumer extends Thread {
private final ConsumerConnector consumer;
private final String topic;
private static Queue<String> queue = new ConcurrentLinkedQueue<String>() ;
public OrderConsumer(String topic) {
consumer = kafka.consumer.Consumer
.createJavaConsumerConnector(createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
props.put("zookeeper.connect", KafkaProperties.zkConnect);
props.put("group.id", KafkaProperties.groupId+"1234");//每个topo设置唯一groupid
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "2000");
props.put("auto.commit.interval.ms", "10000");//
props.put("auto.offset.reset","smallest");//
return new ConsumerConfig(props);
}
String aaString = null;
// push消费方式,服务端推送过来。主动方式是pull
public void run() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
ConsumerIterator<byte[], byte[]> it = stream.iterator();
int n=0;
while (it.hasNext()){
n++;
//逻辑处理
String msg = new String(it.next().message()) ;
System.out.println("consumer:"+msg+" n:"+n);
queue.add(msg) ;
// aaString = new String(it.next().message()) ;
}
}
public Queue<String> getQueue()
{
return queue ;
}
public String getString()
{
return aaString ;
}
public static void main(String[] args) {
OrderConsumer consumerThread = new OrderConsumer(KafkaProperties.topic);
consumerThread.start();
}
}