1, 先写下生产者java代码运行之,然后在虚拟机上启动一个topic,紧接着用在虚拟机上写一个消费者去拿数据。
生产者java代码 :
package cn.itcast.kafka;
import java.util.Properties;
import kafka.producer.KeyedMessage;
import kafka.producer.Producer;
import kafka.producer.ProducerConfig;
import scala.collection.Seq;
//生产者
public class ProducerDemo {
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put("zk.connect","weekend01:2181,weekend02:2181,weekend03:2181");
props.put("metadata.borker.list", "weekend01:9092,weekend02:9092,weekend03:9092");
props.put("serializer.class", "kafka.serializer.StringEncoder");
ProducerConfig config = new ProducerConfig(props);
Producer<String,String> producer = new Producer<String,String>(config);
//发送数据 读取文件 内存数据库 读socket 端口
for(int i = 1 ; i <= 1000 ; i ++ ) {
Thread.sleep(500);
producer.send((Seq<KeyedMessage<String, String>>) new KeyedMessage<String, String>("mytest", "send " + i + " time "));
}
}
}
在kafka集群中创建一个topic
bin/kafka-topics.sh --create --zookeeper weekend01:2181 --replication-factor 3 --partitions 1 --topic mytest
用一个comsumer从某一个topic中读取信息
bin/kafka-console-consumer.sh --zookeeper weekend01:2181 --from-beginning --topic mytest
从虚拟机comsumer控制行可以看到输出 :
send 1 time
send 2 time
send 3 time
send 4 time
send 5 time
send 6 time
send 7 time
send 8 time
..........
2 , 消费者java代码,写完后先在虚拟机上创建一个topic,然后再启动消费者和生产者,做好再java控制台查看消费者输出信息
package cn.itcast.kafka;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerConnector;
import kafka.consumer.KafkaStream;
import kafka.message.MessageAndMetadata;
//消费者
public class ConsumerDemo {
private static final String topic = "mytest" ;
private static final Integer threads = 1 ;
public static void main(String[] args) {
Properties props = new Properties();
props.put("zk.connect","weekend01:2181,weekend02:2181,weekend03:2181");
props.put("group.id","1111"); //分组 组id
props.put("auto.offset.reset","smallest");
ConsumerConfig config = new ConsumerConfig(props);
ConsumerConnector consumer = (ConsumerConnector) Consumer.createJavaConsumerConnector(config);
Map<String,Integer> topicCountMap = new HashMap<String,Integer>();
topicCountMap.put(topic, threads);
Map<String,List<KafkaStream<byte[],byte[]>>> consumerMap = (Map<String, List<KafkaStream<byte[], byte[]>>>) consumer.createMessageStreams((scala.collection.Map<String, Object>) topicCountMap);
List<KafkaStream<byte[],byte[]>> streams = consumerMap.get(topic);
for(final KafkaStream<byte[],byte[]> KafkaStream : streams) {
new Thread(new Runnable() {
@Override
public void run() {
for(MessageAndMetadata<byte[], byte[]> mm : KafkaStream) {
String msg = new String(mm.message());
System.out.println(msg);
}
}
}).start();
}
}
}
运行消费者代码从java命令行可以看到输出 :
send 1 time
send 2 time
send 3 time
send 4 time
send 5 time
send 6 time
send 7 time
send 8 time
..........
没毛病 over