我使用的是0.10.1版本
方法一
public class KafkaConsumer<K, V> implements Consumer<K, V> {
@Override
public Map<String, List<PartitionInfo>> listTopics() {
acquire();
try {
return fetcher.getAllTopicMetadata(requestTimeoutMs);
} finally {
release();
}
}
}
通过构造kafkaConsumer对象,调用 listTopic()方法,kafkaConsumer是消费者的入口类,怎么构造这里就不具体讲了.
方法二
kafka-topics脚本 大家可以通过shell命令来查看kafka的所有topic 这个命令的入口类为
def main(args: Array[String]): Unit = {
val opts = new TopicCommandOptions(args)
if(args.length == 0)
CommandLineUtils.printUsageAndDie(opts.parser, "Create, delete, describe, or change a topic.")
// should have exactly one action
val actions = Seq(opts.createOpt, opts.listOpt, opts.alterOpt, opts.describeOpt, opts.deleteOpt).count(opts.options.has _)
if(actions != 1)
CommandLineUtils.printUsageAndDie(opts.parser, "Command must include exactly one action: --list, --describe, --create, --alter or --delete")
opts.checkArgs()
val zkUtils = ZkUtils(opts.options.valueOf(opts.zkConnectOpt),
30000,
30000,
JaasUtils.isZkSecurityEnabled())
var exitCode = 0
try {
if(opts.options.has(opts.createOpt))
createTopic(zkUtils, opts)
else if(opts.options.has(opts.alterOpt))
alterTopic(zkUtils, opts)
else if(opts.options.has(opts.listOpt))
listTopics(zkUtils, opts)
else if(opts.options.has(opts.describeOpt))
describeTopic(zkUtils, opts)
else if(opts.options.has(opts.deleteOpt))
deleteTopic(zkUtils, opts)
} catch {
case e: Throwable =>
println("Error while executing topic command : " + e.getMessage)
error(Utils.stackTrace(e))
exitCode = 1
} finally {
zkUtils.close()
System.exit(exitCode)
}
}
相信根据这个类也可以很方便的改写,得到消息.
方法三
我最近看 Apache Kafka源码剖析 这本书,kafka自己定义了一套协议,只要遵循这套协议,就可以用任何语言与kafka通信了.所以查询 kafka的集群信息也有一个协议. 本来想利用它的 NetWorkClient来通信,过于复杂.于是用nio,简单写了一个方法,实现获取所有的集群信息.
/*
* Copyright 2013-2018 Lilinfeng.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.producer;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.network.NetworkReceive;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ProtoUtils;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.requests.MetadataResponse;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.util.Iterator;
import java.util.Set;
public class TopicRequestHandle implements Runnable {
private String host;
private int port;
private NetworkReceive receive;
private Send send;
Cluster cluster = null;
private Selector selector;
private SocketChannel socketChannel;
public TopicRequestHandle(String host, int port) {
this.host = host == null ? "127.0.0.1" : host;
this.port = port;
try {
selector = Selector.open();
socketChannel = SocketChannel.open();
socketChannel.configureBlocking(false);
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
}
/*
* (non-Javadoc)
*
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
try {
doConnect();
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
while (cluster==null) {
try {
selector.select(1000);
Set<SelectionKey> selectedKeys = selector.selectedKeys();
Iterator<SelectionKey> it = selectedKeys.iterator();
SelectionKey key = null;
while (it.hasNext()) {
key = it.next();
it.remove();
try {
handleInput(key);
} catch (Exception e) {
e.printStackTrace();
if (key != null) {
key.cancel();
if (key.channel() != null)
key.channel().close();
}
}
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
// 多路复用器关闭后,所有注册在上面的Channel和Pipe等资源都会被自动去注册并关闭,所以不需要重复释放资源
if (selector != null)
try {
selector.close();
} catch (IOException e) {
e.printStackTrace();
}
}
private void handleInput(SelectionKey key) throws IOException {
if (key.isValid()) {
// 判断是否连接成功
SocketChannel sc = (SocketChannel) key.channel();
if (key.isConnectable()) {
if (sc.finishConnect()) {
sc.register(selector, SelectionKey.OP_READ|SelectionKey.OP_WRITE);
} else
System.exit(1);// 连接失败,进程退出
}
if (key.isReadable()) {
if (receive == null) {
receive = new NetworkReceive(-1, "-1");
}
receive.readFrom(socketChannel);
if(receive.complete()){
ByteBuffer payload = receive.payload();
payload.rewind();
short apiKey = ApiKeys.METADATA.id;
short apiVer = ProtoUtils.latestVersion(apiKey);
Struct responseBody = ProtoUtils.responseSchema(apiKey, apiVer).read(receive.payload());
MetadataResponse response = new MetadataResponse(responseBody);
cluster = response.cluster();
}
}
if(key.isWritable()){
send.writeTo(socketChannel);
if(send.completed()){
key.interestOps(key.interestOps() & (~SelectionKey.OP_WRITE));
}
// System.out.println(send.completed());
}
}
}
public void doConnect() throws IOException {
//这里是异步IO
// 如果直接连接成功,则注册到多路复用器上,发送请求消息,读应答
if (socketChannel.connect(new InetSocketAddress(host, port))) {
SelectionKey register = socketChannel.register(selector, SelectionKey.OP_READ|SelectionKey.OP_WRITE);
} else
socketChannel.register(selector, SelectionKey.OP_CONNECT);
}
public NetworkReceive getReceive() {
return receive;
}
public void setReceive(NetworkReceive receive) {
this.receive = receive;
}
public Send getSend() {
return send;
}
public void setSend(Send send) {
this.send = send;
}
}
main函数
package org.apache.kafka.clients.producer;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.RequestHeader;
import org.apache.kafka.common.requests.RequestSend;
public class QueryTopics {
public static void main(String[] args) {
// 1 建立连接 broker
// 这里的 ip 和端口 要改为自己的
TopicRequestHandle topicRequestHandle = new TopicRequestHandle("localhost", 9092);
// 2 构建topic请求
MetadataRequest request = MetadataRequest.allTopics();
RequestHeader header = new RequestHeader(ApiKeys.METADATA.id, "", 0);
RequestSend send = new RequestSend("-1",header, request.toStruct());
// 3 发送请求
topicRequestHandle.setSend(send);
topicRequestHandle.run();
// 4 得到数据
Cluster cluster = topicRequestHandle.cluster;
System.out.println(cluster);
}
}
cluster就是生产者消费者api里,保存和集群topic有关的对象.