一、版本信息
jdk: 1.8
kafka: kafka_2.11-0.10.2.1
Java生产者:
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class Producer_API {
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
//ip及端口
props.put("bootstrap.servers", "10.255.0.61:9092");
//The "all" setting we have specified will result in blocking on the full commit of the record, the slowest but most durable setting.
//“所有”设置将导致记录的完整提交阻塞,最慢的,但最持久的设置。
props.put("acks", "1");
//如果请求失败,生产者也会自动重试,即使设置成0 the producer can automatically retry.
props.put("retries", 0);
//The producer maintains buffers of unsent records for each partition.
props.put("batch.size", 16384);
//默认立即发送,这里这是延时毫秒数
// props.put("linger.ms", 1);
//生产者缓冲大小,当缓冲区耗尽后,额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException
// props.put("buffer.memory", 33554432);
//The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with their ProducerRecord into bytes.
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//创建kafka的生产者类
Producer<String, String> producer = new KafkaProducer<String, String>(props);
//生产者的主要方法
// close();//Close this producer.
// close(long timeout, TimeUnit timeUnit); //This method waits up to timeout for the producer to complete the sending of all incomplete requests.
for(int i = 0; i < 10000; i++){//这里平均写入4个分区
System.out.println(i);
System.out.println("begin");
Thread.sleep(5000);
producer.send(new ProducerRecord<String, String>("acctest",0, Integer.toString(i), Integer.toString(i)));
}
/*int i = 0;
while(true) {
System.out.println(i);
producer.send(new ProducerRecord<String, String>("test",0, Integer.toString(6), Integer.toString(6)));
i++;
}*/
System.out.println("end");
producer.flush(); //所有缓存记录被立刻发送
producer.close();
}
}
Java消费者:
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import net.sf.json.JSONObject;
public class Consumer_API extends Thread {
//消费者连接
private final ConsumerConnector consumer;
//要消费的话题
private final String topic;
public Consumer_API(String topic) {
consumer =kafka.consumer.Consumer
.createJavaConsumerConnector(createConsumerConfig());
this.topic =topic;
}
//配置相关信息
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
props.put("auto.offset.reset", "smallest");
//props.put("zookeeper.connect","localhost:2181,10.XX.XX.XX:2181,10.XX.XX.XX:2181");
//配置要连接的zookeeper地址与端口
//The ‘zookeeper.connect’ string identifies where to find once instance of Zookeeper in your cluster.
//Kafka uses ZooKeeper to store offsets of messages consumed for a specific topic and partition by this Consumer Group
props.put("zookeeper.connect","10.255.0.167:2181");
// props.put("metadata.broker.list", "127.0.0.1:9092");
//配置zookeeper的组id (The ‘group.id’ string defines the Consumer Group this process is consuming on behalf of.)
props.put("group.id", "me");
//配置zookeeper连接超时间隔
//The ‘zookeeper.session.timeout.ms’ is how many milliseconds Kafka will wait for
//ZooKeeper to respond to a request (read or write) before giving up and continuing to consume messages.
props.put("zookeeper.session.timeout.ms","10000");
//The ‘zookeeper.sync.time.ms’ is the number of milliseconds a ZooKeeper ‘follower’ can be behind the master before an error occurs.
props.put("zookeeper.sync.time.ms", "200");
//The ‘auto.commit.interval.ms’ setting is how often updates to the consumed offsets are written to ZooKeeper.
//Note that since the commit frequency is time based instead of # of messages consumed, if an error occurs between updates to ZooKeeper on restart you will get replayed messages.
props.put("auto.commit.interval.ms", "1000");
props.put("rebalance.max.retries", "6");
props.put("rebalance.backoff.ms", "1200");
return new ConsumerConfig(props);
}
public void run(){
Map<String,Integer> topickMap = new HashMap<String, Integer>();
topickMap.put(topic, 1);
Map<String, List<KafkaStream<byte[],byte[]>>> streamMap =consumer.createMessageStreams(topickMap);
System.out.println(streamMap);
KafkaStream<byte[],byte[]>stream = streamMap.get(topic).get(0);
ConsumerIterator<byte[],byte[]> it =stream.iterator();
System.out.println("*********Results********");
while(true){
if(it.hasNext()){
//打印得到的消息
JSONObject jsStr = JSONObject.fromObject(new String(it.next().message()));
System.out.println(jsStr.toString());
}
}
}
public static void main(String[] args) {
MyConsumer_Use consumerThread = new MyConsumer_Use("halo"); //kafka主题
consumerThread.start();
}
}
附: kafka jar可以直接从kafka的安装lib里copy,一定要注意jar包的版本不要搞错,不然会出很多千奇百怪的问题。