目录:
1、代码
2、POM
3、展示
1、代码
package com.donews.data.kafkatest;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import kafka.serializer.StringEncoder;
/***
*
* @author yuhui
*
* @data 2016年12月23日上午10:51:05
*/
public class kafkaProducer extends Thread{
private String topic;
public kafkaProducer(String topic){
super();
this.topic = topic;
}
@Override
public void run() {
Producer<Integer, String> producer = createProducer();
int i=0;
while(true){
try {
String path = "E:\\2017010105.log";
StringBuffer sb = null ;
String line = null;
BufferedReader br = new BufferedReader(new FileReader(path)) ;
while((line=br.readLine())!=null) {
//往Kafka中生产数据
producer.send(new KeyedMessage<Integer, String>(topic, line));
}
System.out.println("文件读取完毕");
Thread.sleep(1000L*60);
} catch (Exception e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
private Producer createProducer() {
Properties properties = new Properties();
properties.put("zookeeper.connect", "tagtic-slave01:2181,tagtic-slave02:2181,tagtic-slave03:2181");//声明zk
properties.put("serializer.class", StringEncoder.class.getName());
properties.put("metadata.broker.list", "tagtic-slave01:9092,tagtic-slave02:9093,tagtic-slave03:9094");// 声明kafka broker
properties.put("group.id", "group1");//声明用户组
return new Producer<Integer, String>(new ProducerConfig(properties));
}
public static void main(String[] args) {
new kafkaProducer("jdktest").start();// 使用kafka集群中创建好的主题 jdktest
}
}
package com.donews.data.kafkatest;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
/***
*
* @author yuhui
*
* @data 2016年12月23日上午10:50:55
*/
public class kafkaConsumer extends Thread{
private String topic;
public kafkaConsumer(String topic){
super();
this.topic = topic;
}
@Override
public void run() {
ConsumerConnector consumer = createConsumer();
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1); // 一次从主题中获取一个数据
Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = messageStreams.get(topic).get(0);// 获取每次接收到的这个数据
ConsumerIterator<byte[], byte[]> iterator = stream.iterator();
int i=0;
while(iterator.hasNext()){
i++;
String message = new String(iterator.next().message());
System.out.println("消费出来的数据第 "+i +" 条是 :"+ message);
}
}
private ConsumerConnector createConsumer() {
Properties properties = new Properties();
properties.put("zookeeper.connect", "tagtic-slave01:2181,tagtic-slave02:2181,tagtic-slave03:2181");//声明zk
properties.put("group.id", "group1");// 生产和消费必须要使用相同的组名称, 如果生产者和消费者都不在同一组,则取不到数据
return Consumer.createJavaConsumerConnector(new ConsumerConfig(properties));
}
public static void main(String[] args) {
new kafkaConsumer("jdktest").start();// 使用kafka集群中创建好的主题 test
}
}
2、POM
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka_2.11</artifactId>
<version>1.6.1</version>
</dependency>
3、展示
1、通过Kafka生产者,将文本的数据放入到Kafka中。
2、查看文本的数据量为2474条
3、通过KafkaManger查看TopicName中数据量为2474条
4、通过Kafka消费者,将TopicName中的数据消费出来