新建spring boot项目:
ProducerFastStart.java
public class ProducerFastStart {
private static final String brokerList = "192.168.131.129:9092";
private static final String topic = "heilu";
public static void main(String[] args) {
Properties properties = new Properties();
// 设置 key 序列化器
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// 设置重试次数
properties.put(ProducerConfig.RETRIES_CONFIG, 10);
// 设置 值 序列化器
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// 设置集群地址
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
// 使用自定义拦截器
properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorPrefix.class.getName());
// "0": 生产者成功写入消息之前不会等待任何来自服务器的响应,出现问题生产者是感知不到的。消费就丢失了。最大吞吐量。
// "1": 只要集群首领节点收到消息,生产者就会收到一个来自服务器的成功响应。消息无法达到首领节点(首领节点崩溃,新的还没选举出来),生产者会收到一个错误响应。
// 为了避免数据丢失,生产者会重发消息。但是还有可能会数据丢失,如果收到写成功通知,此时首领节点还没来得及同步数据到follower节点,首领节点崩溃,就会导致
// 数据丢失。
// "-1": 只有所有参与复制的节点都收到消息,生产者才会收到一个来自服务器的成功响应,最安全。保证不止一个服务器收到消息。
properties.put(ProducerConfig.ACKS_CONFIG, "-1");
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "kafka-demo", "hello,ww!");
try{
// 同步
// Future<RecordMetadata> send = producer.send(record);
// RecordMetadata recordMetadata = send.get();
// System.out.println("topic: " + recordMetadata.topic());
// System.out.println("partition: " + recordMetadata.partition());
// System.out.println("offset: " + recordMetadata.offset());
// 异步
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if(exception == null){
System.out.println("topic: " + metadata.topic());
System.out.println("partition: " + metadata.partition());
System.out.println("offset: " + metadata.offset());
}
}
});
}catch(Exception e){
e.printStackTrace();
}
producer.close();
}
}
ConsumerFastStart.java
public class ConsumerFastStart {
private static final String brokerList = "192.168.131.129:9092";
private static final String topic = "heilu";
private static final String groupId = "group.demo";
public static void main(String[] args) {
Properties properties = new Properties();
// 设置 key 反序列化器
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
// 设置值的 反序列化器
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
consumer.subscribe(Collections.singletonList(topic));
while (true){
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(3000));
for (ConsumerRecord<String, String> record : records){
System.out.println(record);
}
}
}
}