记录最近遇到一个需求中踩到的不少坑,现在整理一下做个汇总
使用JavaAPI写一个Kafka topic创建及修改
目前发现有两种方法:
一种通过注册zookeeper来管理kafka,这是一种很老的方法,这里不做过多赘述
另一种是adminclient
说明:在Kafka0.11.0.0版本之后,多了一个AdminClient,这个是在kafka-client包下的,这是一个抽象类,具体的实现是org.apache.kafka.clients.admin.KafkaAdminClient。这个类可以实现相关的功能,当然必须kafka服务端支持这样的操作才可以。
依赖:kafka-clients的版本必须要在0.11.0.0之后
这里贴上参考的 原帖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.4.0</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
import avro.shaded.com.google.common.collect.Lists;
import avro.shaded.com.google.common.collect.Sets;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.config.ConfigResource;
import java.util.*;
import java.util.concurrent.ExecutionException;
可能会遇到的BUG一:
这里如果直接贴代码,会出现maven把jar包下载了却没有导入的情况,这时需要到Project Structure的libraries中,点击第二栏上面的“+”号选择from Maven,手动输入需要添加的jar包
创建Topic
/**
* 创建topic
*
* @param bootstrapServers kafka集群地址 多个地址用;隔开
* @param topicName topic的名称
* @param partitions 分区数
* @param replication 副本数
*/
private static boolean createTopic(String bootstrapServers, String topicName, int partitions, int replication) {
Properties properties = new Properties();
properties.put("bootstrap.servers", bootstrapServers);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
AdminClient adminClient = null;
try {
adminClient = KafkaAdminClient.create(properties);
NewTopic newTopic = new NewTopic(topicName, partitions, (short) replication);
CreateTopicsResult createTopicsResult = adminClient.createTopics(Lists.newArrayList(newTopic));
createTopicsResult.all().get();
if (createTopicsResult.all().isDone()){
System.out.println("done");
}
} catch (Exception e) {
e.printStackTrace();
return false;
}finally {
if(adminClient != null){
adminClient.close();
}
}
return true;
}
删除topic
/**
* 删除topic
*
* @param bootstrapServers kafka集群地址 多个地址用;隔开
* @param topicName 删除的topic的名称
*/
public boolean deleteTopic(String bootstrapServers, String topicName){
Properties properties = new Properties();
properties.put("bootstrap.servers", bootstrapServers);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
AdminClient adminClient = null;
try {
adminClient = KafkaAdminClient.create(properties);
adminClient.deleteTopics(Arrays.asList(topicName));
return true;
} catch (Exception e){
e.printStackTrace();
}finally {
if(adminClient != null){
adminClient.close();
}
}
return false;
}
查询所有的topic
/**
* 查询所有的topic
*
* @param bootstrapServers kafka集群地址 多个地址用;隔开
* @return
*/
private Set<String> listTopic(String bootstrapServers){
Properties properties = new Properties();
properties.put("bootstrap.servers", bootstrapServers);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
AdminClient adminClient = null;
try {
adminClient = KafkaAdminClient.create(properties);
ListTopicsResult result = adminClient.listTopics();
KafkaFuture<Set<String>> names = result.names();
System.out.println(names.get());
return names.get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}finally {
if(adminClient != null){
adminClient.close();
}
}
return Sets.newHashSet();
}
修改 topic分区
/**
* 修改topic分区
*
* @param bootstrapServers kafka集群地址 多个地址用;隔开
* @return
*/
public static void increTopicPartitions(String bootstrapServers, String topic, int partitions){
Properties props = new Properties();
props.put("bootstrap.servers", bootstrapServers);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
AdminClient adminClient = null;
try {
adminClient = KafkaAdminClient.create(props);
Map<String,NewPartitions> newPartitions =new HashMap<>();
//给Map存入Topic名字和想要增加到的partition数量。参数传入多少就是增加到多少
newPartitions.put(topic,NewPartitions.increaseTo(partitions));
//拿到结果
CreatePartitionsResult result = adminClient.createPartitions(newPartitions);
//执行阻塞方法,等待结果完成
result.all().get();
System.out.println("done");
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}finally {
if(adminClient != null){
adminClient.close();
}
}
}