消费者:
package com.zpark.kafkatest.one;
import java.io.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class ConsumerDemo{
public static void main(String[] args){
receive();
}
private static void receive(){
Properties properties = new Properties();
properties.put("bootstrap.servers", "hdp-3:9092");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("group.id", "yangk");
properties.put("enable.auto.commit", true);
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
consumer.subscribe(Collections.singleton("test"));
//上传信息
URI uri = null;
Configuration conf = null;
String user = "root";
try {
uri = new URI("hdfs://hdp-1:9000");
conf = new Configuration();
conf = new Configuration();
//dfs.replication:分布式文件系统副本的数量
conf.set("dfs.replication", "2");
//dfs.blocksize:分布式文件系统的块的大小 100M 64+36
conf.set("dfs.blocksize", "64m");
} catch (URISyntaxException e) {
e.printStackTrace();
}
try {
FileOutputStream fos = new FileOutputStream("f:/toupload.txt");
OutputStreamWriter osw = new OutputStreamWriter(fos);
try {
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
String msg = "key:" + record.key() + ",value:" + record.value() + ",offset:" + record.offset() + ",topic:" + record.topic() + "\r\n";
System.out.printf("key=%s,value=%s,offet=%s,topic=%s", record.key(), record.value(), record.offset(), record.topic());
BufferedWriter bw = new BufferedWriter(osw);
bw.write(msg);
bw.flush();
}
}
} catch (IOException e) {
e.printStackTrace();
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} finally {
consumer.close();
}
}
}
生产者:
package com.zpark.kafkatest.one;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class ProducerDemo {
public static void main(String[] args) {
send();
}
private static void send(){
Properties properties = new Properties();
properties.put("bootstrap.servers","hdp-2:9092");
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String,String> producer = new KafkaProducer<String,String>(properties);
ProducerRecord<String,String> records = new ProducerRecord<String,String>("test","大美人");
producer.send(records);
producer.close();
}
}
定时器:
package com.zpark.kafkatest.one;
import java.util.Timer;
public class Time {
public static void main(String[] args) {
Timer timer = new Timer();
timer.schedule(new SimpleTime(),5,30*60*1000L);
}
}
package com.zpark.kafkatest.one;
import java.io.File;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimerTask;
public class SimpleTime extends TimerTask {
public void run(){
SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddhhmmss");
String formaat = sdf.format(new Date());
HdfsUtils hdfsUtils = new HdfsUtils();
hdfsUtils.toHdfs(new File("/f:toupload.txt"));
}
}