消息的发送与接收
生产者主要的对象有: KafkaProducer , ProducerRecord 。
其中 KafkaProducer 是用于发送消息的类, ProducerRecord 类用于封装Kafka的消息。
KafkaProducer 的创建需要指定的参数和含义
其他参数可以从 org.apache.kafka.clients.producer.ProducerConfig 中找到。我们后面的内
容会介绍到。
消费者生产消息后,需要broker端的确认,可以同步确认,也可以异步确认。
同步确认效率低,异步确认效率高,但是需要设置回调对象
生产者:
package com.liu.kafka;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
public class MyProducer1 {
public static void main(String[] args) throws ExecutionException, InterruptedException {
Map<String, Object> configs = new HashMap<>();
// 指定初始连接用到的broker地址
configs.put("bootstrap.servers", "192.168.181.140:9092");
// 指定key的序列化类
configs.put("key.serializer", IntegerSerializer.class);
// 指定value的序列化类
configs.put("value.serializer", StringSerializer.class);
// configs.put("acks", "all");
// configs.put("reties", "3");
KafkaProducer<Integer, String> producer = new KafkaProducer<Integer, String>(configs);
List<Header> headers =new ArrayList<>();
headers.add(new RecordHeader("biz.name", "producer.demo".getBytes()));
ProducerRecord<Integer,String> record = new ProducerRecord<Integer, String>(
"topic_1",
0,
0,
"hello kafka ",headers);
//Future<RecordMetadata> future = producer.send(record);
//RecordMetadata recordMetadata = future.get();
//System.out.println("recordMetadata: "+recordMetadata);
// 消息的异步确认
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception == null) {
System.out.println("消息的主题:" + metadata.topic());
System.out.println("消息的分区号:" + metadata.partition());
System.out.println("消息的偏移量:" + metadata.offset());
} else {
System.out.println("异常消息:" + exception.getMessage());
}
}
});
producer.close();
}
}
消费者
spring boot
spring.application.name=springboot-kafka01
server.port=8080
#bootstrap
spring.kafka.bootstrap-servers=192.168.181.140:9092
# producer
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.IntegerSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
#一个批次最多放多少消息
spring.kafka.producer.batch-size=16384
#生产者总的缓冲区大小32M
spring.kafka.producer.buffer-memory=33554432
# consumer
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.IntegerDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.group-id=springboot-consumer02
#如果找不到偏移量就,最早的
spring.kafka.consumer.auto-offset-reset=earliest
#自动提交
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=1000
spring.kafka.consumer.max-poll-records=
config
package com.liu.kafka.kafkaspringboot.config;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class KafkaConfig {
@Bean
public NewTopic topic1() {
return new NewTopic("nptc-01", 3, (short) 1);
}
@Bean
public NewTopic topic2() {
return new NewTopic("nptc-02", 5, (short) 1);
}
@Bean
public KafkaAdmin kafkaAdmin() {
Map<String, Object> configs = new HashMap<>();
configs.put("bootstrap.servers", "192.168.181.140:9092");
KafkaAdmin admin = new KafkaAdmin(configs);
return admin;
}
@Bean
@Autowired
public KafkaTemplate<Integer, String> kafkaTemplate(ProducerFactory<Integer, String> producerFactory) {
// 覆盖ProducerFactory原有设置
Map<String, Object> configsOverride = new HashMap<>();
configsOverride.put(ProducerConfig.BATCH_SIZE_CONFIG, 200);
KafkaTemplate<Integer, String> template = new KafkaTemplate<Integer, String>(
producerFactory, configsOverride
);
return template;
}
}
生产者1
package com.liu.kafka.kafkaspringboot.controller;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.concurrent.ExecutionException;
@RestController
public class KafkaASyncController {
@Autowired
private KafkaTemplate<Integer,String> template;
@RequestMapping("/send/async/{message}")
public String sendSyncMessage(@PathVariable("message") String message) throws ExecutionException, InterruptedException {
ListenableFuture<SendResult<Integer, String>> future = template.send("topic-spring-01", 0, 1,message);
//异步等待
future.addCallback(new ListenableFutureCallback<SendResult<Integer, String>>() {
@Override
public void onFailure(Throwable throwable) {
System.err.println("发送消息失败: "+throwable);
}
@Override
public void onSuccess(SendResult<Integer, String> result) {
final RecordMetadata metadata = result.getRecordMetadata();
System.out.println("发送消息成功: ");
System.out.println("RecordMetadata topic : "+metadata.topic());
System.out.println("RecordMetadata partition : "+metadata.partition());
System.out.println("RecordMetadata offset : "+metadata.offset());
}
});
return "success";
}
}
生产者2
package com.liu.kafka.kafkaspringboot.controller;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.protocol.types.Field;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.concurrent.ExecutionException;
@RestController
public class KafkaSyncController {
@Autowired
private KafkaTemplate<Integer,String> template;
@RequestMapping("/send/sync/{message}")
public String sendSyncMessage(@PathVariable("message") String message) throws ExecutionException, InterruptedException {
ListenableFuture<SendResult<Integer, String>> future = template.send("topic-spring-01", 0, message);
SendResult<Integer, String> result = future.get();
final RecordMetadata metadata = result.getRecordMetadata();
System.out.println("RecordMetadata topic : "+metadata.topic());
System.out.println("RecordMetadata partition : "+metadata.partition());
System.out.println("RecordMetadata offset : "+metadata.offset());
return "success";
}
}
消费者
package com.liu.kafka.kafkaspringboot.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
@Component
public class MyConsumer {
@KafkaListener(topics = "topic-spring-01")
public void onMessage(ConsumerRecord<Integer, String> record){
System.out.println("收到消息: "+record.topic()+"\t"
+record.partition()+"\t"+record.key()+"\t"+record.value()+"\t"
+record.offset());
}
}