kafka-Consumer-SSL-获取对应groupID的offset


import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.JSONPObject;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.SslConfigs;

import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;

public class KafkaConsumer {

    private static volatile boolean isRunning = true;

    public static void main(String[] args) throws IOException
    {
        Properties environment  = new Properties();
        InputStream is = KafkaProductMsg.class.getClassLoader().getResourceAsStream("config/application.properties");
        environment.load(is);

        // 配置SSL
        if (null == System.getProperty("java.security.auth.login.config")) {
            System.setProperty("java.security.auth.login.config", environment.getProperty("java.security.auth.login.config"));
        }

        Properties kafkaProps = new Properties();
        kafkaProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
        kafkaProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, environment.getProperty("ssl.truststore.location"));
        kafkaProps.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
        kafkaProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "KafkaOnsClient");
        kafkaProps.put(SaslConfigs.SASL_MECHANISM, "PLAIN");

        kafkaProps.put("bootstrap.servers", "h1:9093,h2:9093,h3:9093");
        kafkaProps.put("enable.auto.commit", false);  //注意这里设置为手动提交方式
        kafkaProps.put("group.id", "data-stream-group");
        kafkaProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        String topic = "h3yun-bi-web-active-topic";

        Consumer<String,String> consumer = new org.apache.kafka.clients.consumer.KafkaConsumer<String, String>(kafkaProps);
        consumer.subscribe(Arrays.asList(topic));

        try {
            while (isRunning) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
                if (!consumerRecords.isEmpty()) {
                    for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                        System.out.println(
                                "TopicName: " + consumerRecord.topic() +
                                " Partition:" + consumerRecord.partition() +
                                " Offset:" + consumerRecord.offset());

                        String json =  consumerRecord.value();
                        JSONObject p = JSON.parseObject(json);
                        String time = p.getString("event_timestamp");
                        System.out.println("-----receive---->"+time);
                        //进行逻辑处理
                    }
                    consumer.commitAsync();//异步提交
                }
            }
        }catch (Exception e){
            //处理异常
        }
        finally {
            consumer.commitAsync();
            isRunning = false;
            if (consumer != null) {
                consumer.close();
            }
        }

    }
}
<!-- kafka相关依赖-->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka-0-10_2.12</artifactId>
            <version>3.1.1</version>
            <scope>compile</scope>
        </dependency>

 当服务器没有SSL验证的时候,可以直接获取kafka的数据;这里Duration.ofSeconds(100)可以设置的长一些,以保证可以获取导数据

package com.fairyproof.utils;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.SslConfigs;

import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;

public class SetKafkaPos {

    private static final String KAFKA_SERVER="127.0.0.1:9092";
    private static final String groupId="your_groupID";
    private static final String topic="your_topic";

    private static volatile boolean isRunning = true;
    public static void main(String[] args)
    {
        Properties kafkaProps = new Properties();

        kafkaProps.put("bootstrap.servers", KAFKA_SERVER);
        kafkaProps.put("enable.auto.commit", false);  //注意这里设置为手动提交方式
        kafkaProps.put("group.id", groupId);
        kafkaProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        Consumer<String,String> consumer = new KafkaConsumer<String, String>(kafkaProps);
        consumer.subscribe(Arrays.asList(topic));

        try {
            while (isRunning) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(100));
                System.out.println("==count()==>"+consumerRecords.count());
                if (!consumerRecords.isEmpty()) {
                    for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                        System.out.println(
                                " TopicName: " + consumerRecord.topic() +
                                " Partition:" + consumerRecord.partition() +
                                " Offset:" + consumerRecord.offset());
                        String json =  consumerRecord.value();
                        System.out.println("-----receive---->"+json);
                        //进行逻辑处理
                    }
                    //consumer.commitAsync();//异步提交
                    break;
                }
            }
        }catch (Exception e){
            //处理异常
            System.out.println(e);
        }
        finally {
            consumer.commitAsync();
            isRunning = false;
            if (consumer != null) {
                consumer.close();
            }
        }
    }
}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值