SpringBoot 配置Kafka(带加密)

1、pom.xml 依赖

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
</dependency>

2、application.yml 和 application.properties

【注意】:若是kafka不需要配置加密账号密码,则

kafka.inner.security-protocol、kafka.inner.sasl-mechanism、kafka.producer.sasl-jaas-config、kafka.consumer.sasl-jaas-config

均不需要配置,这点在下述额config中会判断

#kafka
kafka.inner.bootstrap-servers=xxx.xx.xx.1:29092, xxx.xx.xx.2:29092, xxx.xx.xx.3:29092
kafka.inner.security-protocol=SASL_PLAINTEXT
kafka.inner.sasl-mechanism=PLAIN
#=============== producer  =======================
# 写入失败时,重试次数。当leader节点失效,一个repli节点会替代成为leader节点,此时可能出现写入失败,
# 当retris为0时,produce不会重复。retirs重发,此时repli节点完全成为leader节点,不会产生消息丢失。
kafka.producer.retries=0
# 生产者jaas配置账号密码
kafka.producer.sasl-jaas-config=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"producer\" password=\"123456\";
# 每次批量发送消息的数量,produce积累到一定数据,一次发送
kafka.producer.batch-size=16384
# produce积累数据一次发送,缓存大小达到buffer.memory就发送数据
kafka.producer.buffer-memory=33554432
kafka.producer.linger-ms=5
#=============== consumer  =======================
# 指定默认消费者group id --> 由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groud.id设置组名
kafka.consumer.group-id=kafkaGroup
# 消费者jaas配置账号密码
kafka.consumer.sasl-jaas-config=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"consumer\" password=\"123456\";
# smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest
kafka.consumer.auto-offset-reset=earliest
# enable.auto.commit:true --> 设置自动提交offset
kafka.consumer.enable-auto-commit=true
#如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
kafka.consumer.auto-commit-interval=100
kafka.consumer.max-poll-records=1000

3、KafkaConfiguration.java

@Value("${kafka.inner.bootstrap-servers}")
    private String bootstrapServers;
    @Value("${kafka.inner.security-protocol}")
	private String kafkaSecurityProtocol;
	@Value("${kafka.inner.sasl-mechanism}")
	private String kafkaSASLMechanism;

    @Value("${kafka.consumer.group-id}")
    private String groupId;
    @Value("${kafka.consumer.sasl-jaas-config}")
    private String kafkaConsumerSASLJaasConfig;
    @Value("${kafka.consumer.auto-offset-reset}")
    private String autoOffsetReset;
    @Value("${kafka.consumer.max-poll-records}")
    private String maxPollRecords;
    @Value("${kafka.producer.retries}")
    private String producerRetries;
    @Value("${kafka.producer.sasl-jaas-config}")
    private String kafkaProducerSASLJaasConfig;
    @Value("${kafka.producer.batch-size}")
    private String producerBatchSize;
    @Value("${kafka.producer.linger-ms}")
    private String producerLingerMs;
    @Value("${kafka.producer.buffer-memory}")
    private String bufferMemory;


    @Bean
    public KafkaListenerContainerFactory<?> batchFactory(){
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new
                ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
        factory.setBatchListener(true); // 开启批量监听
        return factory;
    }

    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> config = new HashMap<>();
        //kafka地址
        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        //组id
        config.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        config.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
        config.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        if (!StringUtils.isEmpty(kafkaSecurityProtocol) && !StringUtils.isEmpty(kafkaSASLMechanism)
                && !StringUtils.isEmpty(kafkaConsumerSASLJaasConfig)) {
            config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, kafkaSecurityProtocol);
            config.put(SaslConfigs.SASL_MECHANISM, kafkaSASLMechanism);
            config.put("sasl.jaas.config", kafkaConsumerSASLJaasConfig);
        }
        return config;
    }

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        Map<String, Object> properties = new HashMap<>();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        properties.put(ProducerConfig.RETRIES_CONFIG, producerRetries); // 重试,0为不启用重试机制
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, producerBatchSize); // 控制批处理大小,单位为字节,默认为16384
        properties.put(ProducerConfig.LINGER_MS_CONFIG, producerLingerMs); // 批量发送,延迟为5毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量,默认为0
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); // 生产者可以使用的总内存字节来缓冲等待发送到服务器的记录,默认为33554432,使用默认值即可

        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        if (!StringUtils.isEmpty(kafkaSecurityProtocol) && !StringUtils.isEmpty(kafkaSASLMechanism)
				&& !StringUtils.isEmpty(kafkaProducerSASLJaasConfig)) {
			properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, kafkaSecurityProtocol);
			properties.put(SaslConfigs.SASL_MECHANISM, kafkaSASLMechanism);
			properties.put("sasl.jaas.config", kafkaProducerSASLJaasConfig);
		}
        return new DefaultKafkaProducerFactory<>(properties);
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

4、kafka生产者

# 注入在config配置好的kafka对象
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;

# 直接使用send方法发送

kafkaTemplate.send(topic,message);
kafkaTemplate.send(topic,key,message);
kafkaTemplate.sendDefault(message);
kafkaTemplate.sendDefault(key,message);

5、kafka消费者

# 单条接收数据
@KafkaListener(topics = "topic")
public void onMessage(Sring message){
   logger.info("接受数据:{}", message);             
}

# 批量接收数据,这里需要配置containerFactory,而我们在上述的kafkaConfig文件中配置了该消费者
@KafkaListener(topics = "topic",containerFactory = "batchFactory")
public void onMessage(List<ConsumerRecord<?, ?>> records){
   List<Map<String,String>> mapList = new ArrayList<>();
   for (ConsumerRecord<?, ?> record : records) {
       Optional<?> kafkaMessage = Optional.ofNullable(record.value());
       logger.info("Received: " + record);
       if (kafkaMessage.isPresent()) {
            Object message = record.value();
            logger.info("接受数据:{}", message);
       }
    }
}

 

  • 2
    点赞
  • 28
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值