JAVA spring Boot集成kafka的使用 - 生产与发送消息
这里使用的是spring boot集成+kafka
1.生产消息
1.1 传统的消息生产者
传统的方式是需要手动创建Properties的配置对象去加入各种参数然后使用生产对象去发送消息
传统的方式需要在send
方法中实现callback接口来获取发送成功或失败的状态
public class Producer {
static Logger log = Logger.getLogger(Producer.class);
private static final String TOPIC = "pikachu";
private static final String BROKER_LIST = "http://localhost/:9092";
private static KafkaProducer<String,String> producer = null;
/*
初始化生产者
*/
static {
Properties configs = initConfig();
producer = new KafkaProducer<String, String>(configs);
}
/*
初始化配置
*/
private static Properties initConfig(){
Properties properties = new Properties();
properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,BROKER_LIST);
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,BROKER_LIST);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
properties.put("client. id", "producer. client. id. demo");//加上groupid
return properties;
}
public static void main(String[] args) throws InterruptedException {
//消息实体
ProducerRecord<String , String> record = null;
record = new ProducerRecord<String, String>(TOPIC, "value:testValue";
//发送消息,实现callback接口判断是否成功或失败
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (null != e){
log.info("发送失败" + e.getMessage());
}else {
log.info("发送成功"+String.format("offset:%s,partition:%s",recordMetadata.offset(),recordMetadata.partition()));
}
}
});
producer.close();
}
}
1.2 集成的消息生产者
集成的方式没有直接使用kafka的生产对象去发送消息,而是使用spring下的KafkaTemplate类发送消息,可以直接用监听返回对象获取发送后的状态
@Component
@Slf4j
public class JinkxProducer {
@Autowired
private KafkaTemplate<String, Object> kafkaTemplate; //spring framework封装的kafka的包
//自定义topic
public static final String TOPIC_TEST = "topic.test";
public void send(Object obj) {
String obj2String = JSONObject.toJSONString(obj);
log.info("准备发送消息为:{}", obj2String);
//发送消息,监听返回对象包装 实现成功跟失败后的接口
ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(TOPIC_TEST, obj);
future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
@Override
public void onFailure(Throwable throwable) {
//失败的处理
log.info(TOPIC_TEST + " - 生产者 发送消息失败:" + throwable.getMessage());
}
@Override
public void onSuccess(SendResult<String, Object> stringObjectSendResult) {
//成功的处理
log.info(TOPIC_TEST + " - 生产者 发送消息成功:" + stringObjectSendResult.toString());
}
});
}
}
1.消费消息
1.1 集成的消息消费
从任意来源获取到kafka的主题后,利用kafka监听的注解来监听数据从而进行消费,注意监听的主题topic一定要正确
/**
* @author huhan
* @Date 2022/1/12
* @apiNote
*/
@Component
@Slf4j
public class KafkaConsumer {
//模拟任意来源订阅的topic 一定要与从生产者定义的主题相同
public static final String TOPIC_TEST = "topic.test";
@KafkaListener(topics = TOPIC_TEST)
public void topic_test(ConsumerRecord<?, ?> record, Acknowledgment ack, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) {
Optional message = Optional.ofNullable(record.value());
if (message.isPresent()) {
Object msg = message.get();
log.info("消费者 消费了: Topic:" + topic + ",Message:" + msg);
ack.acknowledge();
}
}
@KafkaListener(topics = TOPIC_TEST) //@header 是获取kafka请求中的topic数据 使用的是kafkaHeader 包下的枚举类
public void topic_test1(ConsumerRecord<?, ?> record, Acknowledgment ack, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) {
Optional message = Optional.ofNullable(record.value());
if (message.isPresent()) {
Object msg = message.get();
log.info("消费者 消费了: Topic:" + topic + ",Message:" + msg);
ack.acknowledge();
}
}
}
未完待续…