kafka新版本的生产者与消费者
0.11后的版本
生产者:KafkaProducer
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.errors.RetriableException;
private KafkaProducer createProducer() {
Properties properties = new Properties();
properties.put("bootstrap.servers", "127.0.0.1:9091,127.0.0.1:9092,127.0.0.1:9093");
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("retires", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("acks", "-1");
return new KafkaProducer(properties);
}
@Override
public void run() {
KafkaProducer kafkaProducer = createProducer();
int i = 0;
try {
while (true) {
ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic, "key1", "生产到: " + i++);
kafkaProducer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null) {
//发送成功
} else {
if (e instanceof RetriableException) {
//处理可重试瞬时异常
} else {
//处理不可重试异常
}
}
}
});
TimeUnit.SECONDS.sleep(1);
}
} catch (Exception e) {
e.printStackTrace();
}
}
消费者:KafkaConsumer
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
private KafkaConsumer createConsumer() {
Properties kafkaPropertie = new Properties();
//配置broker地址,配置多个容错
kafkaPropertie.put("bootstrap.servers", "127.0.0.1:9091,127.0.0.1:9092,127.0.0.1:9093");
//配置key-value允许使用参数化类型,反序列化
kafkaPropertie.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaPropertie.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaPropertie.put("enable.auto.commit", "false");
kafkaPropertie.put("group.id", "1");
return new KafkaConsumer<String, String>(kafkaPropertie);
}
@Override
public void run() {
KafkaConsumer consumer = createConsumer();
consumer.subscribe(Collections.singletonList("testTopic"));
//轮询消息
try {
while (true) {
//获取ConsumerRecords,一秒钟轮训一次
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000).toMillis());
//消费消息,遍历records
for (ConsumerRecord<String, String> record : records) {
System.out.println("partition: " + record.partition());
System.out.println("topic: " + record.topic());
System.out.println("offset: " + record.offset());
System.out.println(record.key() + ":" + record.value());
}
consumer.commitAsync();// 异步非阻塞,所有订阅的分区提交位移
}
} catch (Exception e) {
e.printStackTrace();
callBack();
}
}
新版本的消费者不与zookeeper连接
自定义分区器
自定义对象转码器