参考: https://www.orchome.com/451
Kafka集群的安装见上文,本文介绍使用Java API通过kafka发送和接收消息。
1. kafka客户端依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>1.0.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.0.1</version>
</dependency>
2 Kafka消息生产者API
package kafka;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class ProducerDemo {
private static final String MY_TOPIC = "my-topic";
public static void main(String[] args) {
Properties properties = new Properties();
// Kafka 服务器地址
properties.put("bootstrap.servers", "127.0.0.1:9092,127.0.0.1:9093");
// 消息应答机制
properties.put("acks", "all");
// 如果请求失败,生产者会自动重试,我们指定是0次,如果启用重试,则会有重复消息的可能性
properties.put("retries", 0);
properties.put("batch.size", 16384);
// 默认缓冲可立即发送,即便缓冲空间还没有满,但是,如果你想减少请求的数量,可以设置linger.ms大于0
properties.put("linger.ms", 1);
// 控制生产者可用的缓存总量,如果消息发送速度比其传输到服务器的快,将会耗尽这个缓存空间
properties.put("buffer.memory", 33554432);
// 消息序列化和反序列化方法
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// 创建并发送消息
try (Producer<String, String> producer = new KafkaProducer<>(properties)) {
for (int i = 0; i < 100; i++) {
String msg = "Message-index-" + i;
producer.send(new ProducerRecord<>(MY_TOPIC, msg));
System.out.println("Sent: " + msg);
}
}
}
}
消息发送结果:
3. Kafka消息消费者API
package kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Collections;
import java.util.Properties;
public class ConsumerDemo {
private static final String MY_TOPIC = "my-topic";
public static void main(String[] args) {
Properties properties = new Properties();
// kafka 服务器地址
properties.put("bootstrap.servers", "127.0.0.1:9092");
// 当前消费者所在的consumer group
properties.put("group.id", "group-1");
// 消息消费后自动提交,也可改为手动提交
properties.put("enable.auto.commit", true);
// 自动提交间隔时间
properties.put("auto.commit.interval.ms", "1000");
properties.put("auto.offset.reset", "earliest");
// 停止心跳的时间超过session.timeout.ms,那么就会认为是故障的,它的分区将被分配到别的进程
properties.put("session.timeout.ms", "30000");
// 消息序列化和反序列化方法
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 订阅 my-topic 主题的消息
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
consumer.subscribe(Collections.singletonList(MY_TOPIC));
// 不停的获取消息并消费
while (true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
System.out.println("records count: " + records.count());
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
System.out.println();
}
}
}
}
消息消费的结果如下: