生产者:
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.hive.com.esotericsoftware.minlog.Log;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Producer {
// 配置日志获取方式
private static final Logger log = LoggerFactory.getLogger(Producer.class);
// 创建producer对象
private KafkaProducer<Integer, String>producer;
// 定义topic
private String topic;
// 是否为异步发送
private Boolean isAsync;
// 配置参数 broker的地址列表
private Properties properties = new Properties();
// 指定产生多少消息
private int messageNumToSend;
// 构造方式初始化
public Producer(Boolean isAsync, int messageNum, String topic){
properties.put("bootstrap.servers", "hadoop:9092");
properties.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
this.topic = topic;
this.isAsync = isAsync;
this.messageNumToSend = messageNum;
this.producer = new KafkaProducer<Integer,String>(properties);
}
// 同步(sync) 异步(async) oneway ====》 “producer.type”参数进行指定
public void sendMessage() throws InterruptedException, ExecutionException{
Log.info("producer start");
int number = 0;
String value = "";
while (number < messageNumToSend){
value = "message" + number;
ProducerRecord record = new ProducerRecord(topic, number, value);
// 同步发送
if(!isAsync){
producer.send(record).get();
Log.info("send_sync" + "\t" + (number + 1) + "\t" + value);
}
else{ // 异步发送
long startTime = System.currentTimeMillis(); // 获取系统信息
producer.send(record, new AsyncCallback(startTime, number, value));
Log.info("send_sync" + "\t" + (number + 1) + "\t" + value);
}
number ++;
}
producer.close();
}
class AsyncCallback implements Callback{
private long startTime;
private int key;
private String message;
// 实现AsyncCallback
AsyncCallback(long startTime, int key,String message){
this.startTime = startTime;
this.key = key;
this.message = message;
}
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
// TODO Auto-generated method stub
// 经过多少时间
long time = System.currentTimeMillis() - startTime;
if (recordMetadata!=null){
Log.info("message(" + key + "," + message + ")sent to patition (" +
recordMetadata.partition()+")," +
"offset(" + recordMetadata.offset() +
") in " + time + "ms"
);
}else if(e!=null){
Log.error("the exception occured" + e);
}
}
}
//异步发送: 当producer 每产生一批数据 send出去之后 就会调用一次实现了Callback的onCompletion方法
public static void main(String[] args) throws InterruptedException, ExecutionException {
// TODO Auto-generated method stub
Producer producer = new Producer(false, 100, "test2");
producer.sendMessage();
}
}
结果:
消费者:
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;;
public class Comsumer {
public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = new Properties();
properties.put("bootstrap.servers", "hadoop:9092");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("enable.auto.commit", true);
properties.put("session.timeout.ms", 30000);
properties.put("group.id", "kafka");
// 创建consumer对象
KafkaConsumer<Integer, String> consumer = new KafkaConsumer<Integer,String>(properties);
// 订阅数据 topic:test2
consumer.subscribe(Arrays.asList("test2"));
while(true){
// poll 获取数据
ConsumerRecords<Integer, String> consumerRecords = consumer.poll(100);
for(ConsumerRecord<Integer, String> consumerRecord : consumerRecords){
System.out.println("消费的数据为:"+consumerRecord.value());
}
}
}
}
结果: