Kafka虚拟机操作
- 开启 zookeeper
bin/zookeeper-server-start.sh config/zookeeper.properties - 开启 kafka
bin/kafka-server-start.sh config/server.properties
3.创建topic
bin/kafka-topics.sh --create --zookeeper 172.20.10.9:2181 --replication-factor 1 --partitions 1 --topic test1
4.查看所有topic列表
bin/kafka-topics.sh --zookeeper 172.20.10.9:2181 --list
5.查看指定topic信息
bin/kafka-topics.sh --zookeeper 172.20.10.9:2181 --describe --topic test1
6.增加topic分区
bin/kafka-topics.sh --zeekeeper 172.20.10.9:2181 --alter --topic test1 partitions 10
7.删除topic
bin/kafka-topics.sh --delete --topic test --zookeeper localhost:2181
8.producer创造消息
bin/kafka-console-producer.sh --broker-list 172.20.10.9:9092 --topic test1
9.consumer消费消息
bin/kafka-console-consumer.sh --bootstrap-server 172.20.10.9:9092 --topic test1 --from-beginning
代码实现
主题的增删改查
0.工具类
ZkUtils zk=ZkUtils.apply(“172.20.10.9:2181”,30000,30000, JaasUtils.isZkSecurityEnabled());
1.增
AdminUtils.createTopic(zk,“t1”,1(partitions),1(replicationFactor),new Propertoes(),RackAwareMode.Enforced
.
M
O
D
U
L
E
.MODULE
.MODULE);
2.删
AdminUtils.deleteTopic(zk,topicName);
3.改
Properties prop = AdminUtils.fetchEntityConfig(zk, ConfigType.Topic(), topicName);
//增加topic级别属性
prop.put(“min.cleannable.dirty.ratio”,“0.3”);
//删除topic级别属性
prop.remove(“max.message.bytes”);
prop.put(“retention.ms”,“1000”);
AdminUtils.changeTopicConfig(zk,topicName,prop);
4.查
a.查询全部
List list = JavaConversions.seqAsJavaList(zk.getAllTopics());
b.查询全部及配置信息
Map<String, Properties> stringPropertiesMap = AdminUtils.fetchAllTopicConfigs(zk);
c.根据名字查询topic配置信息
Properties prop = AdminUtils.fetchEntityConfig(zk, ConfigType.Topic(), topicName);
生产者消费者的实现
一.生产者
public class MyProducer extends Thread {
private String topic;
private KafkaProducer<String,String> producer;
public MyProducer(String topic) {
this.topic = topic;
Properties prop = new Properties();
prop.put("bootstrap.servers",KafkaProperties.broker);
prop.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
producer=new KafkaProducer<String, String>(prop);
}
public void run(){
int messageNo=1;
while(true){
String message="helloworld"+messageNo;
System.err.println("发送 ="+message);
producer.send(new ProducerRecord<String,String>(topic,message));
messageNo++;
try{
Thread.sleep(200);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
public static void main(String[] args){
new MyProducer(KafkaProperties.topic).start();
}
}
public class KafkaProperties {
public static final String broker="172.20.10.9:9092";
public static final String topic ="test1";
}
二.消费者
public class MyConsumer extends Thread{
private final KafkaConsumer<Integer,String> consumer;
public MyConsumer(String topic) {
Properties props = new Properties();
//设置集群kafka的ip地址和端口号
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
"172.20.10.9:9092");
//设置消费者的group.id
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-consumer-group");
//消费信息以后自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true");
//控制消费者提交的频率
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
//key反序列化
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.IntegerDeserializer");
//value反序列化
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringDeserializer");
//创建kafka消费者对象
consumer = new KafkaConsumer<Integer, String>(props);
consumer.subscribe(Arrays.asList("test1"));
}
@Override
public void run() {
while (true) {
/*每隔一段时间到服务器拉取数据*/
ConsumerRecords<Integer, String> consumerRecords = consumer.poll(1000);
if(consumerRecords.isEmpty()!=true) {
for (ConsumerRecord record : consumerRecords) {
System.err.println("消息为" + record.value());
}
}
}
}
public static void main(String[] args) {
new MyConsumer("test1").start();
}
}
配置文件
public class KafkaProperties {
public static final String broker="172.20.10.9:9092";
public static final String topic ="test1";
}