【Spring Cloud Alibaba】消息驱动 Kafka RocketMQ RabbitMQ Stream

本文详细介绍了如何在Spring Cloud Stream框架下集成Kafka、RocketMQ和RabbitMQ,包括环境搭建、Spring Boot集成、消息生产与消费。同时,文章探讨了Spring Cloud Stream的基本概念,如Topic、Producer和Consumer,并展示了自定义通信信道和策略的实现。
摘要由CSDN通过智能技术生成

SpringCloud Stream是一个用来为微服务应用构建消息驱动能力的框架。通过使用SpringCloud Stream,可以有效简化开发人员对消息中间件的使用复杂度,让开发人员有更多的精力来关注核心业务

1 Kafka

Apache Kafka 是一个开源分布式事件流平台,被数千家公司用于高性能数据管道、流分析、数据集成和任务关键型应用程序。
在这里插入图片描述

环境搭建在参考 6.4 Kafka数据传输

1.1 Spring Boot 集成

单独创建模块(仅演示demo)

<dependencies>
    <dependency>
        <groupId>cn.flowboot.e.commerce</groupId>
        <artifactId>e-commerce-common</artifactId>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>
</dependencies>

配置可以直接在配置文件中配置也可以代码进行配置
yaml配置如下

server:
  port: 9191

spring:
  kafka:
    bootstrap-servers: 127.0.0.1:9092

代码方式配送(注:非必要,可直接在yml配置)

package cn.flowboot.e.commerce.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;

import java.util.HashMap;
import java.util.Map;

/**
 * <h1>通过代码自定义Kafka配置</h1>
 * <h2>也可以通过yml文件配置-非必须</h2>
 * @version 1.0
 * @author: Vincent Vic
 * @since: 2022/03/10
 */
//@Configuration
public class KafkaConfig {
   
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    /**
     * <h2> producerFactory -  Kafka Producer 工厂配置<h2>
     * version: 1.0 - 2022/3/10
     * @return {@link ProducerFactory< String, String> }
     */
    @Bean
    public ProducerFactory<String,String> producerFactory(){
   
        Map<String,Object> configs = new HashMap<>();
        configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,bootstrapServers);
        configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
        return new DefaultKafkaProducerFactory<>(configs);
    }

    /**
     * <h2> kafkaTemplate - 客户端<h2>
     * version: 1.0 - 2022/3/10
     * @return {@link KafkaTemplate< String, String> }
     */
    @Bean
    public KafkaTemplate<String,String> kafkaTemplate(){
   
        return new KafkaTemplate<>(producerFactory());
    }

    /**
     * <h2> consumerFactory - Kafka Consumer 工厂配置<h2>
     * version: 1.0 - 2022/3/10
     * @return {@link ConsumerFactory< String, String> }
     */
    @Bean
    public ConsumerFactory<String,String> consumerFactory(){
   
        Map<String,Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,bootstrapServers);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,50);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        return new DefaultKafkaConsumerFactory<>(props);
    }

    /**
     * <h2> kafkaListenerContainerFactory - 监听器工厂配置<h2>
     * version: 1.0 - 2022/3/10
     * @param
     * @return {@link ConcurrentKafkaListenerContainerFactory< String, String> }
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String,String> kafkaListenerContainerFactory(){
   
        ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<String,String>();
        //并发数
        factory.setConcurrency(3);
        factory.setConsumerFactory(consumerFactory());

        return factory;
    }

}

启动类略

消息VO

package cn.flowboot.e.commerce.vo;

import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;

/**
 * <h1>自定义Kafka 传递的消息对象</h1>
 *
 * @version 1.0
 * @author: Vincent Vic
 * @since: 2022/03/10
 */
@AllArgsConstructor
@NoArgsConstructor
@Data
public class KafkaMessage {
   
    private Integer id;
    private String projectName;
}

Kafaka生产者

package cn.flowboot.e.commerce.kafka;

import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;

import java.util.concurrent.TimeUnit;

/**
 * <h1>Kafaka生产者</h1>
 *
 * @version 1.0
 * @author: Vincent Vic
 * @since: 2022/03/10
 */
@Slf4j
@RequiredArgsConstructor
@Component
public class KafkaProducer {
   

    private final KafkaTemplate<String,String> kafkaTemplate;

    /**
     * <h2> sendMessage - 发送Kafka消息<h2>
     * version: 1.0 - 2022/3/10
     * @param key 键
     * @param value 值
     * @param topic 主题
     */
    public void sendMessage(String key,String value,String topic){
   
        if (StringUtils.isBlank(value) || StringUtils.isBlank(topic)){
   
            throw new IllegalArgumentException("value or topic is null or empty");
        }
        ListenableFuture<SendResult<String, String>> future = StringUtils.isBlank(key)?
                kafkaTemplate.send(topic, value):kafkaTemplate.send(topic,key, value);

        // 异步回调的方式获取通知
        future.addCallback(
            success ->{
   
                assert null != success && null != success.getRecordMetadata();
                // 发送到 kafka 的 topic
                String _topic = success.getRecordMetadata().topic();
                // 消息发送到的分区
                int partition = success.getRecordMetadata().partition();
                // 消息在分区内的 offset
                long offset = success.getRecordMetadata().offset();

                log.info("send kafka message success: [{}], [{}], [{}]", _topic, partition, offset);
            },
            failure ->{
   
                log.error("send kafka message failure: [{}], [{}], [{}]", key, value, topic);
            }
        );

        // 同步等待的方式获取通知
        try {
   
            //SendResult<String, String> sendResult = future.get();
            SendResult<String, String> sendResult = future.get(5, TimeUnit.SECONDS);

            // 发送到 kafka 的 topic
            String _topic = sendResult.getRecordMetadata().topic();
            // 消息发送到的分区
            int partition = sendResult.getRecordMetadata().partition();
            // 消息在分区内的 offset
            long offset = sendResult.getRecordMetadata().offset();

            log.info("send kafka message success: [{}], [{}], [{}]",
                    _topic, partition, offset);
        } catch (Exception ex) {
   
            log.error("send kafka message failure: [{}], [{}], [{}]",
                    key, value, topic);
        }

    }
}

Kafaka消费者

package cn.flowboot.e.commerce.kafka;

import cn.flowboot.e.commerce.vo.KafkaMessage;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import java.util.Optional;

/**
 * <h1>Kafaka消费者</h1>
 *
 * @version 1.0
 * @author: Vincent Vic
 * @since: 2022/03/10
 */
@Slf4j
@RequiredArgsConstructor
@Component
public class KafkaConsumer {
   

    private final ObjectMapper mapper;



    /**
     * <h2>监听 Kafka 消息并消费</h2>
     * */
    @KafkaListener(topics = {
   "kafka-springboot"}, groupId = "springboot-kafka")
    public void listener01(ConsumerRecord<String, String> record) throws Exception {
   

        String key = record.key();
        String value = record.value();

        KafkaMessage kafkaMessage = mapper.readValue(value, KafkaMessage.class);
        log.info("in listener01 consume kafka message: [{}], [{}]",
                key, mapper.writeValueAsString(kafkaMessage));
    }

    /**
     * <h2>监听 Kafka 消息并消费</h2>
     * */
    @KafkaListener(topics = {
   "kafka-springboot"}, groupId = 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值