Kafka发布订阅举例

摘要

kafka的发布订阅模式本人认为可以通过两种示例来表达:
(1)在微服务内部实现发布,在需要实用信息的微服务组件 订阅。这个方法实现较好的实现各个服务之间的解耦。
(2)单独做成一个微服务,暴露出一个发布的接口,当每个微服务有发布需求的时候直接调用这个发布接口就好。但是这就使得运维成本加大,不过服务使用方便,最关键的是开发人员在发布消息的时候需要编写的代码较少,能较好的保证编译通过。

第一种模式

1、配置

1.1 pom依赖

<dependency>

 <groupId>org.apache.kafka</groupId>

 <artifactId>kafka_2.13</artifactId>

 <version>2.4.1</version>

</dependency>

<dependency>

 <groupId>org.apache.kafka</groupId>

 <artifactId>kafka_2.11</artifactId>

 <version>0.8.2.1</version>

 <exclusions>

  <exclusion>

   <groupId>org.slf4j</groupId>

   <artifactId>slf4j-log4j12</artifactId>

  </exclusion>

 </exclusions>

</dependency>

<dependency>

 <groupId>org.apache.kafka</groupId>

 <artifactId>kafka-clients</artifactId>

 <version>2.4.1</version>

 <scope>compile</scope>

</dependency>

1.2 yml配置

kafka:
  servers: ${kafka-address}
  consumer:
    auto:
      commit:
        interval:
          ms: 100
    bootstrap:
      servers: ${kafka-address}
    enable:
      auto:
        commit: true
    group:
      id: xxxB
    key:
      deserializer: org.apache.kafka.common.serialization.StringDeserializer
    maxPollRecords: 10000
    session:
      timeout:
        ms: 30000
    value:
      deserializer: org.apache.kafka.common.serialization.StringDeserializer
  producer:
    acks: all
    batch:
      size: 4096
    bootstrap:
      servers: ${kafka-address}
    buffer:
      memory: 40960
    defaultTopic: Detail
    key:
      serializer: org.apache.kafka.common.serialization.StringSerializer
    linger:
      ms: 10
    retries: 3
    value:
      serializer: org.apache.kafka.common.serialization.StringSerializer
topics: Detail

1.3 生产者配置处理代码

import java.util.HashMap;

import java.util.Map;

import org.apache.kafka.clients.producer.ProducerConfig;

import org.apache.kafka.common.serialization.StringSerializer;

import org.springframework.beans.factory.annotation.Value;

import org.springframework.context.annotation.Bean;

import org.springframework.context.annotation.Configuration;

import org.springframework.kafka.core.DefaultKafkaProducerFactory;

import org.springframework.kafka.core.KafkaTemplate;

import org.springframework.kafka.core.ProducerFactory;

@Configuration

public class KafkaProducerConfig {

  @Value(value = "${kafka.producer.bootstrap.servers}")

  private String bootstrapAddress;

  @Bean

  public ProducerFactory<String, String> producerFactory() {

    Map<String, Object> configProps = new HashMap<>();

    configProps.put(

        ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,

        bootstrapAddress);

    configProps.put(

        ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,

        StringSerializer.class);

    configProps.put(

        ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,

        StringSerializer.class);

    return new DefaultKafkaProducerFactory<>(configProps);

  }

  @Bean

  public KafkaTemplate<String, String> kafkaTemplate() {

    return new KafkaTemplate<>(producerFactory());

  }

}

1.4 消费者配置处理代码

import java.util.HashMap;

import java.util.Map;

import org.apache.kafka.clients.consumer.ConsumerConfig;

import org.apache.kafka.common.serialization.StringDeserializer;

import org.springframework.beans.factory.annotation.Value;

import org.springframework.context.annotation.Bean;

import org.springframework.context.annotation.Configuration;

import org.springframework.kafka.annotation.EnableKafka;

import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;

import org.springframework.kafka.core.ConsumerFactory;

import org.springframework.kafka.core.DefaultKafkaConsumerFactory;

@EnableKafka

@Configuration

public class KafkaConsumerConfig {

  @Value(value = "${kafka.producer.bootstrap.servers}")

  private String bootstrapAddress;

  @Value(value = "${kafka.consumer.group.id}")

  private String groupId;

  @Bean

  public ConsumerFactory<String, String> consumerFactory() {

    Map<String, Object> props = new HashMap<>();

    props.put(

        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,

        bootstrapAddress);

    props.put(

        ConsumerConfig.GROUP_ID_CONFIG,

        groupId);

    props.put(

        ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,

        StringDeserializer.class);

    props.put(

        ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,

        StringDeserializer.class);

    return new DefaultKafkaConsumerFactory<>(props);

  }

  @Bean

  public ConcurrentKafkaListenerContainerFactory<String, String>

  kafkaListenerContainerFactory() {

    ConcurrentKafkaListenerContainerFactory<String, String> factory =

        new ConcurrentKafkaListenerContainerFactory<>();

    factory.setConsumerFactory(consumerFactory());

    return factory;

  }

}

1.5 topic配置处理代码

import java.util.HashMap;

import java.util.Map;

import org.apache.kafka.clients.admin.AdminClientConfig;

import org.apache.kafka.clients.admin.NewTopic;

import org.springframework.beans.factory.annotation.Value;

import org.springframework.context.annotation.Bean;

import org.springframework.context.annotation.Configuration;

import org.springframework.kafka.core.KafkaAdmin;

@Configuration

public class KafkaTopicConfig {

  @Value(value = "${kafka.producer.bootstrap.servers}")

  private String bootstrapAddress;

  @Bean

  public KafkaAdmin kafkaAdmin() {

    Map<String, Object> configs = new HashMap<>();

    configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);

    return new KafkaAdmin(configs);

  }

  @Bean

  public NewTopic topic1() {

    return new NewTopic("baeldung", 1, (short) 1);

  }

}

2 发布订阅

2.1生产者发布处理代码

发布模式采用异步发送回调处理的方式

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.cbest.bee.kafka.api.dto.dupower.kafka.KafkaCommonMessageDTO;
import com.cbest.bee.kafka.server.dupower.kafka.KafkaSendMessageService;
import lombok.extern.slf4j.Slf4j;
import org.apache.poi.ss.formula.functions.T;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

/**
 * @author chen
 * @data 2020/4/14 14:12
 *
 * 发布消息到Kafka  ServiceImpl implements KafkaSendMessageService
 */
@Slf4j
@Service
public class KafkaSendMessage {
  @Autowired
  private KafkaTemplate<String, String> kafkaTemplate;
  
  protected static String toJSONString(Object jsonObj) {
    return JSON.toJSONStringWithDateFormat(jsonObj, "yyyy-MM-dd HH:mm:ss",
        SerializerFeature.DisableCircularReferenceDetect, SerializerFeature.WriteNullListAsEmpty,
        SerializerFeature.WriteNullStringAsEmpty, SerializerFeature.WriteNullBooleanAsFalse);
  }

  @Override
  public void kafkaSendMessage(KafkaCommonMessageDTO<String> data) {
      String SendDataJSON=toJSONString(data.getData());
      ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(data.getTopic(),SendDataJSON);
      future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
        @Override
        public void onSuccess(SendResult<String, String> result) {
          log.info("消息发布成功,消息体:"+SendDataJSON);
        }
        @Override
        public void onFailure(Throwable ex) {
          log.info("消息发布失败,请重发息:"+SendDataJSON);
        }
      });

  }
}

2.2 消费者订阅处理代码

订阅是实时订阅

import com.cbest.bee.ict.server.dupower.service.ShdbPushInfoService;

import com.cbest.bee.res.api.vo.dupower.ConnectorStatusNotificationVO;

import lombok.extern.slf4j.Slf4j;

import org.springframework.beans.factory.annotation.Autowired;

import org.springframework.kafka.annotation.KafkaListener;

import org.springframework.stereotype.Component;

@Slf4j

@Component

public class KafkaMessageListener {

  @Autowired

  private ShdbPushInfoService shdbPushInfoService;

  @KafkaListener(topics = "Topic", groupId = "xxxA")

  public void kafkaGunListener(String gunJSON) {

    shdbPushInfoService.shdbPushNotificationStationInfo(gunJSON);

    log.info("kafka订阅的订单数据:"+gunJSON);
      }
    }

    

第二种模式

这种方式是单独的微服务实现的
项目结构如图:
这是API结构
在这里插入图片描述
配置采用yml文件的配置方式,并引入变量,这样便于在不同环境间修改使用。
下面是本人自定的消息公共类,各位可以参考:

import com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import lombok.Data;


/**
 * @author chen
 * @data 2020/4/14 14:06
 */
@Data
public class KafkaCommonMessageDTO<T> implements Serializable {

  private static final long serialVersionUID = -7872749328405692998L;
  @JsonProperty("Data")
  private T data;//发布的消息数据
  @JsonProperty("Topic")
  private String topic;//发布主题
  @JsonProperty("GroupId")
  private String groupId;
}

注意:pom文件配置不要出错,接口大家自己写一个就好,我就不贴出了,有不懂的可以留言问我,编译不通过请留言,我给你解释下。
本文采用的是异步回调的发送模式。

  • 6
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值