Kafka提供了非常简单的客户端API。只需要引入一个Maven依赖即可:
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.13</artifactId>
<version>3.4.0</version>
</dependency>
生产者
使用kafka提供的Producer类,进行消息发送
构建Producer主要分为三个步骤
- 设置Producer核心属性:Producer的属性都是由ProducerConfig类管理。在ProducerConfig中,对于比较重要的属性,都配置了DOC属性进行说明。
- 消息构建:Kafka消息是Key-Value结构,key、value可以是任意类型的对象,key主要是用来进行Partition分区,value用来传输业务数据。
- 使用Producer发送消息:常用的消息发送方式有 单向发送、异步发送、异步发送 三种方式
public class MyProducer {
private static final String KAFKA_SERVER = "server0:9092,server1:9092";
public static final String TOPIC = "disTopic";
public static final int messageSize = 3;
public static void main(String[] args) throws Exception {
/** 1、设置发送者相关属性 */
Properties properties = new Properties();
//设置kafka端口
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.liyy.basic.MyInterceptor");
//配置key、value的序列化类
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
/**2、发送消息*/
Producer<String, String> producer = new KafkaProducer(properties);
for (int i = 0; i < messageSize; i++) {
/**3、构建消息*/
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, String.valueOf(i), "producer-1-" + i);
//2.1 单向发送,不关心服务应答
producer.send(record);
System.out.println("message "+i+" do send");
}
for (int i = 0; i < messageSize; i++) {
/**3、构建消息*/
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, String.valueOf(i), "producer-2-" + i);
//2.2 同步发送 获取到服务端应答前 会阻塞当前线程
RecordMetadata recordMetadata = producer.send(record).get();
StringBuilder builder = new StringBuilder("");
builder.append("message [").append(recordMetadata.toString())
.append("] send to topic [").append(recordMetadata.topic())
.append("];partition [").append(recordMetadata.partition())
.append("];offset [").append(recordMetadata.offset())
.append("]");
System.out.println(builder.toString());
}
CountDownLatch latch = new CountDownLatch(messageSize);
for (int i = 0; i < messageSize; i++) {
/**3、构建消息*/
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, String.valueOf(i), "producer-2-" + i);
//2.3 异步发送 消息发送后不阻塞,服务端有应答后 触发回调函数
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (null != e) {
System.out.println("消息发送失败," + e.getMessage());
e.printStackTrace();
} else {
StringBuilder builder = new StringBuilder("");
builder.append("message [").append(recordMetadata.toString())
.append("] send to topic [").append(recordMetadata.topic())
.append("];partition [").append(recordMetadata.partition())
.append("];offset [").append(recordMetadata.offset())
.append("]");
System.out.println(builder.toString());
}
latch.countDown();
}
});
//消息处理结束后,关闭消息生产者
latch.await();
producer.close();
}
}
}
使用拦截器,可以对消息内容尽心处理
public class MyInterceptor implements ProducerInterceptor {
//发送消息时触发
@Override
public ProducerRecord onSend(ProducerRecord producerRecord) {
System.out.println("onSend producerRecord:" + producerRecord.toString());
return producerRecord;
}
//收到服务端响应时触发
@Override
public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) {
System.out.println("onAcknowledgement recordMetadata:" + recordMetadata);
}
@Override
public void close() {
System.out.println("producer close");
}
//整理配置项
@Override
public void configure(Map<String, ?> map) {
System.out.println("=====config start======");
for (Map.Entry<String, ?> entry : map.entrySet()) {
System.out.println("entry.key:" + entry.getKey() + ",entry.value:" + entry.getValue());
}
System.out.println("=====config end======");
}
消费者
使用Kafka提供的Consumer类,进行消息接收
构建Consumer同样主要分为3个步骤
- 设置Consumer核心属性:Consumer的可选属性可以由ConsumerConfig进行管理,在这个类中,对于大部分比较重要的的属性,都配置了DOC文件进行说明
- 拉取消息:Kafka的消费者使用拉取消息的Pull模式,消费者从broker上拉取一批订阅的消息,然后对消息进行处理
- 提交点位(offset):消费者需要向broker提交偏移量offset,如果不提交offset,broker会认为消费者处理消息失败,再次拉取回拉取到重复消息
public class MyConsumer {
public static final String KAFKA_SERVER = "server0:9092,server1:9092";
public static final String TOPIC = "disTopic";
public static void main(String[] args) {
/**1、设置消费者属性*/
Properties properties = new Properties();
//kafka地址
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
//为消费者指定group
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
Consumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
//订阅topic
consumer.subscribe(Arrays.asList(TOPIC));
while (true) {
/**2、拉取消息*/
//设置超时时间:100ms
ConsumerRecords<String, String> records = consumer.poll(Duration.ofNanos(100));
//处理消息
for (ConsumerRecord<String, String> record : records) {
StringBuilder builder = new StringBuilder();
builder.append("offset = ").append(record.offset())
.append(";key=").append(record.key())
.append(";value=").append(record.value());
System.out.println(builder.toString());
}
/**3、提交offset,消息就不会重复推送*/
if (true) {
//同步提交:表示必须等到offset提交后,才回去消费下一批消息
consumer.commitSync();
} else {
//异步提交:表示发送提交offset请求后,就开始消费下一批消息,不用等待broker的确认
consumer.commitSync();
}
}
}
}
Kafka客户端主要就是按照三个大步骤的方式运行。在具体使用过程中,最大的变化就是通过给生产者、消费者设置合适的属性,这些属性的设置会极大影响客户端程序的执行方式。