spring boot项目中配置kafka

1、首先配置文件配置kafka相关配置信息
#kafka服务地址
spring.kafka.bootstrap-servers=ip:9092

#kafka消费者配置
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.group-id=test-consumer-group
spring.kafka.consumer.auto-commit-interval=1000
spring.kafka.consumer.session.timeout.ms=1500

#kafka生产者配置
spring.kafka.producer.acks=all
spring.kafka.producer.retries=0
spring.kafka.producer.batch-size=16384
spring.kafka.producer.linger.ms=1
spring.kafka.producer.buffer-memory=33554432

2、新建kafka生产者

package com.multi.store.manage.kafka.config;

import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;

import java.util.HashMap;
import java.util.Map;

/**
 * kafka生产者相关配置
 * @date 2018/8/21 9:17.
 */
@Configuration
@EnableKafka
public class KafkaProducerConfig {

    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;
    @Value("${spring.kafka.producer.acks}")
    private String acks;
    @Value("${spring.kafka.producer.retries}")
    private String retries;
    @Value("${spring.kafka.producer.batch-size}")
    private String batchSize;
    @Value("${spring.kafka.producer.linger.ms}")
    private String lingerMs;
    @Value("${spring.kafka.producer.buffer-memory}")
    private String bufferMemory;

    public Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ProducerConfig.ACKS_CONFIG,acks);
        props.put(ProducerConfig.RETRIES_CONFIG,retries);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,batchSize);
        props.put(ProducerConfig.LINGER_MS_CONFIG,lingerMs);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,bufferMemory);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<String, String>(producerFactory());
    }
}

3、新建kafka消费者

package com.multi.store.manage.kafka.config;

import com.multi.store.manage.kafka.KafkaMessageListener;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;


/**
 * kafka消费者相关配置
 * @date 2018/8/20 18:20.
 */
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;
    @Value("${spring.kafka.consumer.group-id}")
    private String groupId;
    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String enableAutoCommit;
    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private String autoCommitInterval;
    @Value("${spring.kafka.consumer.session.timeout.ms}")
    private String sessionTimeoutMs;

    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.getContainerProperties().setPollTimeout(1500);
        return factory;
    }

    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }


    public Map<String, Object> consumerConfigs() {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,enableAutoCommit);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,autoCommitInterval);
//        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,sessionTimeoutMs);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return propsMap;
    }

    @Bean
    public KafkaMessageListener listener() {
        return new KafkaMessageListener();
    }
}

4、新建kafka监听

package com.multi.store.manage.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;

/**
 * kafka监听
 * @date 2018/8/20 18:31.
 */
public class KafkaMessageListener {
   private static final Logger LOGGER = LoggerFactory.getLogger(KafkaMessageListener.class);
   /**
    * 
    * @createDate:2018/8/20 18:40
    * @return
    */
   @KafkaListener(topics = {"test-topics"})
   public void linstenOtaStatus(ConsumerRecord<?, ?> record){
      LOGGER.info("test-topics -->kafka监听到的值为:{}",record.value().toString());
   }
}

5、新建测试类

package com.multi.store.manage.utils.controller;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

/**
 * @date 2018/9/18 9:52.
 */
@RestController
@RequestMapping(value = "kafka")
public class KafkaController {
    @Autowired
    private KafkaTemplate kafkaTemplate;


    @RequestMapping(value = "sendMessage")
    public String sendMessage(@RequestBody String msgJson){
        kafkaTemplate.send("ota-topics",msgJson);
        return "成功";
    }
}

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Spring Boot使用Kafka需要在配置文件指定Kafka的相关配置。下面是一个简单的Kafka配置文件示例: ``` spring.kafka.bootstrap-servers=localhost:9092 spring.kafka.consumer.group-id=my-group spring.kafka.consumer.auto-offset-reset=earliest spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer ``` 解释一下上述配置项的含义: - `spring.kafka.bootstrap-servers`:Kafka集群的地址,如果有多个地址,可以用逗号分隔。 - `spring.kafka.consumer.group-id`:消费者所属的组ID。 - `spring.kafka.consumer.auto-offset-reset`:指定消费者在没有初始偏移量的情况下该如何开始消费消息。这里配置为最早的偏移量。 - `spring.kafka.consumer.key-deserializer`:消费者使用的键反序列化器。 - `spring.kafka.consumer.value-deserializer`:消费者使用的值反序列化器。 - `spring.kafka.producer.key-serializer`:生产者使用的键序列化器。 - `spring.kafka.producer.value-serializer`:生产者使用的值序列化器。 注意:这里的序列化器和反序列化器需要根据实际情况进行替换。如果使用的是JSON格式的消息,可以使用`org.springframework.kafka.support.serializer.JsonSerializer`和`org.springframework.kafka.support.serializer.JsonDeserializer`。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值