kafka-2.11-2.00 生产消费创建主题
1、使用kafka创建主题,获取所有主题
public class CreateTopic {
public static void main(String[] args) throws ExecutionException, InterruptedException {
Properties properties = new Properties();
properties.put("zookeeper.connect","master:2181");
properties.put("bootstrap.servers","master:9092");
AdminClient adminClient = AdminClient.create(properties);
ListTopicsResult listTopicsResult = adminClient.listTopics();
KafkaFuture<Set<String>> names = listTopicsResult.names();
Set<String> set = names.get();
Iterator<String> it = set.iterator();
while (it.hasNext()){
System.out.println(it.next());
}
adminClient.close();
}
}
2、kafka生产者
public class ProducerDemo {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("bootstrap.servers","master:9092");
properties.put("acks", "all");
properties.put("retries", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer kafkaProducer = new KafkaProducer(properties);
for (int i = 0; i < 100; i++)
kafkaProducer.send(new ProducerRecord<String, String>("test",Integer.toString(i), Integer.toString(i)));
kafkaProducer.close();
}
}
3丶kafka消费者
public class ConsumerDemo {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("bootstrap.servers","master:9092");
properties.put("group.id", "test");
properties.put("enable.auto.commit", "true");
properties.put("auto.commit.interval.ms", "1000");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer kafkaConsumer = new KafkaConsumer(properties);
kafkaConsumer.subscribe(Arrays.asList("test"));
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records)
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}
4丶使用docker 启动kafka的命令(自己记载怕忘记)
启动kafka容器
docker run -d --name kafka \
-p 9092:9092 \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.204.128:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.204.128:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -t wurstmeister/kafka