java获取kafka的元数据信息

1,java代码获取kafka的基础信息,查出所有的topic,然后遍历信息package kafkamonitor;import com.alibaba.fastjson.JSONArray;import com.alibaba.fastjson.JSONObject;import com.google.common.collect.Lists;import com.google.common.collect.Maps;import kafka.api.PartitionOffs
摘要由CSDN通过智能技术生成

1,java代码获取kafka的基础信息,查出所有的topic,然后遍历信息

package kafkamonitor;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.google.common.collect.Lists;

import com.google.common.collect.Maps;

import kafka.api.PartitionOffsetRequestInfo;

import kafka.common.TopicAndPartition;

import kafka.javaapi.OffsetResponse;

import kafka.javaapi.PartitionMetadata;

import kafka.javaapi.TopicMetadata;

import kafka.javaapi.TopicMetadataRequest;

import kafka.javaapi.consumer.SimpleConsumer;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;

import org.slf4j.LoggerFactory;
 

import java.util.*;



public class KafkaOffset_test {

    private static final Logger LOGGER = LoggerFactory.getLogger(KafkaOffset_test.class);

    public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);

        Map requestInfo = Maps.newHashMap();

        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));

        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);

        OffsetResponse response = consumer.getOffsetsBefore(request);

        if (response.hasError()) {
            LOGGER.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));

            return 0;

        }

        long[] offsets = response.offsets(topic, partition);

        return offsets[0];

    }

    /**

     * @param brokers broker 地址

     * @param topic topic

     * @return map

     */

    public static Map findLeader(List<String> brokers, String topic) {
        Map<Integer,PartitionMetadata> map = Maps.newHashMap();

        for (String broker : brokers) {
            SimpleConsumer consumer = null;

            try {
                String[] hostAndPort = broker.split(":");

                consumer = new SimpleConsumer(hostAndPort[0], Integer.parseInt(hostAndPort[1]), 100000, 64 * 1024, "leaderLookup" + new Date().getTime());

                List topics = Lists.newArrayList(topic);

                TopicMetadataRequest req = new TopicMetadata
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
可以使用 Kafka Java API 中的 AdminClient 类来获取 Kafka topic 存储数据的元数据信息。具体实现代码如下: ```java import java.util.Properties; import java.util.concurrent.ExecutionException; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.DescribeTopicsResult; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartitionInfo; public class KafkaTopicMetadata { public static void main(String[] args) throws InterruptedException, ExecutionException { Properties props = new Properties(); props.setProperty("bootstrap.servers", "localhost:9092"); try (AdminClient adminClient = AdminClient.create(props)) { String topicName = "test-topic"; DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topicName)); KafkaFuture<Map<String, TopicDescription>> kafkaFuture = describeTopicsResult.all(); Map<String, TopicDescription> topicDescriptionMap = kafkaFuture.get(); TopicDescription topicDescription = topicDescriptionMap.get(topicName); List<TopicPartitionInfo> topicPartitionInfoList = topicDescription.partitions(); System.out.println("Topic: " + topicName); System.out.println("Partitions: " + topicPartitionInfoList.size()); for (TopicPartitionInfo topicPartitionInfo : topicPartitionInfoList) { Node leader = topicPartitionInfo.leader(); List<Node> replicas = topicPartitionInfo.replicas(); List<Node> isr = topicPartitionInfo.isr(); System.out.println("Partition: " + topicPartitionInfo.partition()); System.out.println("Leader: " + leader.host() + ":" + leader.port()); System.out.println("Replicas: " + replicas); System.out.println("Isr: " + isr); } } } } ``` 以上代码中,我们首先创建一个 AdminClient 对象,然后调用其 describeTopics 方法获取指定 topic 的元数据信息。接着,我们从返回的 Map 中取出对应的 TopicDescription 对象,进而获取其包含的 Partition 信息。最后,我们遍历 Partition 信息获取各个 Partition 的 Leader、Replicas 和 Isr 等信息
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值