验证KAFKA证书

pom.xml引入依赖

	  <dependency>
		  <groupId>org.apache.kafka</groupId>
		  <artifactId>kafka-clients</artifactId>
		  <version>3.1.2</version>
	  </dependency>

测试代码

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SslConfigs;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;


public class ConsumerManage {

    private static KafkaConsumer<String, String> kafkaConsumer;

    public static KafkaConsumer<String, String> getKafkaConsumer()  {
        //下面为测试配置数据,请根据实际配置替换
        String groupId = "group-zhou02";
        String bootstrapServers = "192.168.1.176:9093";
        String certPassword = "zhozhozhozhozhozho";
        String sslKeystore = "D:\\SSL\\zhou02\\zhou02.keystore.jks";
        String sslTruststore = "D:\\SSL\\zhou02\\zhou02.truststore.jks";
        if (kafkaConsumer == null) {
            Properties properties=new Properties();
            properties.put(CommonClientConfigs.GROUP_ID_CONFIG, groupId);
            properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            properties.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, sslKeystore);
            properties.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, certPassword);
            properties.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,sslTruststore);
            properties.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, certPassword);
            properties.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, certPassword);
            properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
            properties.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");

            //是否自动提交
            properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
            //自动提交offset之间的间隔毫秒数
            properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
            properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
            /**
             * 消费者在指定的时间内没有发送心跳给群组协调器,就被认为已经死亡,
             * 协调器就会触发再均衡,把它的分区分配给其他消费者。
             */
            properties.put("session.timeout.ms", "30000");
            properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            kafkaConsumer = new KafkaConsumer<String, String>(properties);
        }
        return kafkaConsumer;
    }

    public void consume(String topic){
        kafkaConsumer = getKafkaConsumer();

        // 订阅主题
        ArrayList<String> topics = new ArrayList<>();
        topics.add(topic);
        kafkaConsumer.subscribe(topics);

        // 消费数据
        while (true){
            ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
            for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                System.out.println("开始消费");
                System.out.println(consumerRecord.value());
                //业务处理逻辑
                //若需y要手动提交offset值:请将上面自动提交配置改为false,在打开下面注释
                //kafkaConsumer.commitSync();
            }
        }
    }

    public static void main(String[] args) {
       new ConsumerManage().consume("topic-zhou02");
    }


}



 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值