1. pom.xml
2. TestProducer.java
3. TestConsumer.java
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.1.0</version>
</dependency>
2. TestProducer.java
package cn.gov.zjport.demo.kafka;
import java.util.Properties;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
public class TestProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.3.9:9092,192.168.3.10:9092,192.168.3.18:9092");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//-1 所有节点, 0 不等待回应, 1等待一个节点回应
props.put("acks", "1");
//发送失败重试次数
props.put("retries", 1);
//满足以下三个条件之一就批量发送
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
//压缩算法snappy/gzip, gzip对cpu要求高,性能慢,压缩率好
props.put("compression.codec", "snappy");
int i=0;
Producer<String, String> producer = new KafkaProducer<String, String>(props);
while(true){
try{
Thread.sleep(1000);
boolean sync = true; //是否同步
if (sync) {
try {
producer.send(new ProducerRecord<String, String>("topicA", "Hello")).get();
} catch (Exception e) {
e.printStackTrace();
}
} else {
producer.send(new ProducerRecord<String, String>("topicA", "Hello"));
//producer.send(new ProducerRecord<String, String>(TOPIC, KEY, VALUE));
//异步回调
/*producer.send(new ProducerRecord<String, String>(TIPIC, VALUE), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
e.printStackTrace();
} else {
System.out.println(metadata.toString());//org.apache.kafka.clients.producer.RecordMetadata@1d89e2b5
System.out.println(metadata.offset());//1
}
}
});*/
}
System.out.println("Success:"+i);
}catch(Exception e){
e.printStackTrace();
}
i++;
}
/*producer.flush();
producer.close();*/
}
}
3. TestConsumer.java
package cn.gov.zjport.demo.kafka;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class TestConsumer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.3.9:9092,192.168.3.10:9092,192.168.3.18:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//相同目的的Consumers可以组成group, 并且group.id相同
props.setProperty("group.id", "TopicA-Consumers");
//关闭自动提交
props.setProperty("enable.auto.commit", "false");
//如果group没有partition的offset记录,则从头开始读取
props.setProperty("auto.offset.reset", "earliest");
Consumer<String, String> consumer = new KafkaConsumer<String, String>(props);
try{
//可预订多个主题
consumer.subscribe(Arrays.asList("topicA"));
while(true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
if(records.count()>0){
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s \r\n", record.offset(), record.key(), record.value());
}
//提交记录offset
consumer.commitSync();
}
}
}finally{
consumer.close();
}
}
}