参考地址
maven
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>2.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.0.0</version>
</dependency>
- 三个属性必须指定
bootstrap.servers :
broker的地址清单
key.serializer
value.serializer :
将 java对象作为键值传给broker,但是broker希望收到的键和值时字节数组,所以,使用将对象序列化成字节数组的序列化器
生产者发送消息
package org.example.test;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class ProducerDemo {
public void writeMsg(String msg) {
Properties prop = new Properties();
/*
kafka给我们提供了一个配置类ProducerConfig
broker的地址列表
* */
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "single:9092");
/*acks=all
*所有参与复制的节点收到消息,生产者才会收到来自服务器的成功响应
* 这种模式最安全
* */
prop.put(ProducerConfig.ACKS_CONFIG, "all");
/*生产者发送时遇到可重试的异常时,则可进行发送重试
此参数规定了重试的次数
* */
prop.put(ProducerConfig.RETRIES_CONFIG, "0");
//key和Value的序列化
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getTypeName());
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getTypeName());
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
ProducerRecord<String, String> rec = new ProducerRecord<String, String>("mydemo", msg);
producer.send(rec);
producer.close();
}
}
消费者接受消息
package org.example.test;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;
public class ConsumerDemo {
public void readMsg() {
Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "single:9092");
prop.put(ConsumerConfig.GROUP_ID_CONFIG, "sq");
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getTypeName());
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getTypeName());
/*消费者是否自动提交偏移量,默认时true
* */
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
/*每一秒提交一次
* */
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
/*auto.offset.rest:读取无偏移量分区或偏移量无效下的处理
erliest:在偏移量无效时,消费者将从起始位置读取分区记录
latest:在偏移量无效时,消费者从最新位置读取分区记录
* */
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
//主题 分区编号
TopicPartition tp = new TopicPartition("mydemo", 0);
ArrayList<TopicPartition> list = new ArrayList<>();
list.add(tp);
consumer.assign(list);
//一直读kafka
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMinutes(100));
records.forEach(rec -> {
System.out.println(rec.topic() + ":" + rec.partition() + ":" + rec.key() + "" + rec.value());
});
}
}
}