Group coordinator cdhtest002.sendinfo.com:9092 (id: 2147483581 rack: null) is unavailable or invalid

今天我在Windows下写了一个scala代码,用spark去消费kafka的数据, 然后报Group coordinator cdhtest002.sendinfo.com:9092 (id: 2147483581 rack: null) is unavailable or invalid, will attempt rediscovery 这个错误,这个是集群(kafka)中ip和hostname对应关系没有配置,解决办法如下:

Windows下修改C:\Windows\System32\drivers\etc\hosts文件,添加ip hostname

Linux 下修改/etc/hostname文件,添加ip hostname

你可以使用 Kafka 的 Java API 来实现这个命令,具体实现如下: ```java import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.ConsumerGroupDescription; import org.apache.kafka.clients.admin.ConsumerGroupListing; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; import org.apache.kafka.clients.admin.ListConsumerGroupsResult; import org.apache.kafka.clients.admin.ListOffsetsResult; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.StringDeserializer; import java.time.Duration; import java.util.Collections; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutionException; public class KafkaConsumerGroups { public static void main(String[] args) throws ExecutionException, InterruptedException { Properties adminProps = new Properties(); adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); AdminClient adminClient = AdminClient.create(adminProps); // List Consumer Groups ListConsumerGroupsResult consumerGroupsResult = adminClient.listConsumerGroups(); Set<ConsumerGroupListing> consumerGroups = consumerGroupsResult.all().get(); for (ConsumerGroupListing group : consumerGroups) { System.out.println("Consumer Group: " + group.groupId()); // Describe Consumer Group ConsumerGroupDescription consumerGroupDescription = adminClient.describeConsumerGroups(Collections.singleton(group.groupId())).all().get().get(group.groupId()); System.out.println("State: " + consumerGroupDescription.state()); System.out.println("Coordinator: " + consumerGroupDescription.coordinator().toString()); System.out.println("Members: " + consumerGroupDescription.members().size()); System.out.println("Topic Partitions:"); Map<String, TopicDescription> topicDescriptions = adminClient.describeTopics(consumerGroupDescription.members().stream().map(member -> member.assignment().topicPartitions().iterator().next().topic()).distinct().toArray(String[]::new)).all().get(); for (TopicDescription topicDescription : topicDescriptions.values()) { for (TopicPartition partition : topicDescription.partitions()) { System.out.println("\t" + topicDescription.name() + "-" + partition.partition() + ": " + consumerGroupDescription.assignment().partitionsForTopic(topicDescription.name()).contains(partition)); } } // List Consumer Group Offsets ListConsumerGroupOffsetsResult consumerGroupOffsetsResult = adminClient.listConsumerGroupOffsets(group.groupId()); Map<TopicPartition, Long> consumerGroupOffsets = consumerGroupOffsetsResult.partitionsToOffsetAndMetadata().get(); System.out.println("Consumer Group Offsets:"); for (Map.Entry<TopicPartition, Long> entry : consumerGroupOffsets.entrySet()) { System.out.println("\t" + entry.getKey().topic() + "-" + entry.getKey().partition() + ": " + entry.getValue()); } // List Latest Offsets Properties consumerProps = new Properties(); consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, group.groupId()); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps); Map<TopicPartition, Long> latestOffsets = consumer.endOffsets(consumerGroupOffsets.keySet(), Duration.ofSeconds(5)); System.out.println("Latest Offsets:"); for (Map.Entry<TopicPartition, Long> entry : latestOffsets.entrySet()) { System.out.println("\t" + entry.getKey().topic() + "-" + entry.getKey().partition() + ": " + entry.getValue()); } // List Earliest Offsets Map<TopicPartition, Long> earliestOffsets = consumer.beginningOffsets(consumerGroupOffsets.keySet(), Duration.ofSeconds(5)); System.out.println("Earliest Offsets:"); for (Map.Entry<TopicPartition, Long> entry : earliestOffsets.entrySet()) { System.out.println("\t" + entry.getKey().topic() + "-" + entry.getKey().partition() + ": " + entry.getValue()); } } } } ``` 这个程序使用 Kafka 的 AdminClient 来获取消费者组信息、消费者组偏移量以及主题分区的最早和最新偏移量。你可以根据自己的需求修改程序,例如只获取特定的消费者组信息等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值