创建topic
sh /usr/hdp/3.1.0.0-78/kafka/bin/kafka-topics.sh --create --zookeeper cluster1.hadoop:2181 --replication-factor 1 --partitions 1 --topic KafkaTest
删除指定名称的topic(假删除,数据还在)
delete.topic.enable=true
sh /usr/hdp/3.1.0.0-78/kafka/bin/kafka-topics.sh --delete -- zookeeper sc-slave1:2181 --topic TestKafka
作为⽣产者向指定Broker发送消息
sh /usr/hdp/3.1.0.0-78/kafka/bin/kafka-console-consumer.sh --bootstrap-server cluster1.hadoop:6667 -topic KafkaTest -from-beginning
consumer从topic消费消息数据生产者
sh /usr/hdp/3.1.0.0-78/kafka/bin/kafka-console-producer.sh --broker-list cluster0.hadoop:6667 --topic KafkaTest
Java实现
导入依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.0.0</version>
</dependency>
<!--kafka的⽇志组件依赖包 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.25</version>
</dependency>
<!-- all in one 打包 -->
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-withdependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>assembly</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
Java实现生产者
// ⽣产者抽象对象
public KafkaProducer<String, String> producer;
// 传⼊brokerList,以hostname:port的⽅式,多个之间⽤,号隔开
public KafkaProducerUtil(String brokerList) {
Properties props = new Properties();
// 服务器ip:端⼝号,集群⽤逗号分隔
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
// key序列化指定类
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class.getName());
// value序列化指定类
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
StringSerializer.class.getName());
// ⽣产者对象
producer = new KafkaProducer<String, String>(props);
}
public void close(){
this.producer.close();
}
public static void main(String[] args) {
// 初始化broker列表
String brokerList = "cluster1.hadoop:6667,cluster0.hadoop:6667";
String topic="TestKafka";
// 初始化⽣产者⼯具类
KafkaProducerUtil kafkaProducerUtil = new
KafkaProducerUtil(brokerList);
// 向test_topic发送hello, kafka
kafkaProducerUtil.producer.send(new ProducerRecord<String, String>(
topic, "hello,天亮教育!"));
kafkaProducerUtil.close();
System.out.println("done!");
}
Java实现消费者
// 消费者对象
public KafkaConsumer<String, String> kafkaConsumer;
public KafkaConsumerUtil(String brokerList, String topic) {
Properties props = new Properties();
// 服务器ip:端⼝号,集群⽤逗号分隔
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
// 消费者指定组,名称可以随意,注意相同消费组中的消费者只能对同⼀个分区消费⼀次
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TestTL");
// 是否启⽤⾃动提交offset,默认true
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
// ⾃动提交间隔时间1s
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
// key反序列化指定类
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
// value反序列化指定类,注意⽣产者与消费者要保持⼀致,否则解析出问题
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
// 消费者对象
kafkaConsumer = new KafkaConsumer<>(props);
//订阅Topic
kafkaConsumer.subscribe(Arrays.asList(topic));
}
public void close() {
kafkaConsumer.close();
}
public static void main(String[] args) {
// 初始化broker列表
String brokerList = "cluster0.hadoop:6667,cluster1.hadoop:6667";
String topic = "TestKafka";
// 初始化消费者⼯具类
KafkaConsumerUtil kafkaConsumerUtil = new KafkaConsumerUtil(brokerList, topic);
boolean runnable=true;
while (runnable) {
ConsumerRecords<String, String> records =
kafkaConsumerUtil.kafkaConsumer
.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("key = %s, offset = %d, value = %s",
record.key(),record.offset(),
record.value());
System.out.println();
}
}
kafkaConsumerUtil.close();
System.out.println("done!");
}