步骤
- 安装配置一个可以运行的kafka服务器
- 创建java的maven项目
- 导入pom依赖
- 编写kafka生产者代码
- 编写kafka消费者代码
- 运行消费者代码
- 运行生产者代码
pom.xml
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.7.0</version>
</dependency>
kafka 生产者
package com.my.study;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class KafkaProducerMain {
public static void main(String[] args) {
Properties props = new Properties();
// kafka服务器
props.put("bootstrap.servers", "192.168.122.129:9092");
// 这个配置意味着leader会等待所有的follower同步完成。这个确保消息不会丢失,除非kafka集群中所有机器挂掉。这是最强的可用性保证
props.put("acks", "all");
// 配置为大于0的值的话,客户端会在消息发送失败时重新发送
props.put("retries", 0);
props.put("linger.ms", 1);
// 配置为大于0的值的话,客户端会在消息发送失败时重新发送。
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// 值序列化,默认org.apache.kafka.common.serialization.StringDeserializer
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i < 100; i++) {
producer.send(new ProducerRecord<String, String>("my-topic", Integer.toString(i), Integer.toString(i)));
System.out.println("发送成功!" + i);
}
producer.close();
}
}
kafka 消费者
package com.my.study;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumerMain {
public static void main(String[] args) {
Properties props = new Properties();
// kafka 服务器
props.setProperty("bootstrap.servers", "192.168.122.129:9092");
// 组名 不同组名可以重复消费。例如你先使用了组名A消费了kafka的1000条数据,但是你还想再次进行消费这1000条数据,并且不想重新去产生,那么这里你只需要更改组名就可以重复消费了
props.setProperty("group.id", "test");
// 是否自动提交,默认为true
props.setProperty("enable.auto.commit", "true");
// 从poll(拉)的回话处理时长
props.setProperty("auto.commit.interval.ms", "1000");
// 键序列化,默认org.apache.kafka.common.serialization.StringDeserializer
props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 键序列化,默认org.apache.kafka.common.serialization.StringDeserializer
props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList("my-topic"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}
}
运行生产者再运行消费者
成功!
参考文档
https://www.cnblogs.com/xuwujing/p/8371127.html
官方生产者api:http://kafka.apache.org/27/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html
官方消费者api:http://kafka.apache.org/27/javadoc/index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html
官方api文档:http://kafka.apache.org/documentation/#api