客户端
操作都在kafka目录下(开启出现问题都记录在/opt/module/kafka/nohup.out文件下,可查看解决。)
启动并挂入后台(挂起后才可以进行生产与消费)
nohup bin/kafka-server-start.sh config/server.properties &
查看当前kafka中的主题
bin/kafka-topics.sh --zookeeper hadoop101:2181 --list
创建主题
使用指定主机、端口号、创建3个副本一个分区的主题first
bin/kafka-topics.sh --zookeeper hadoop101:2181 --create --replication-factor 3 --partitions 1 --topic first
选项说明:
–topic 定义topic名
–replication-factor 定义副本数
–partitions 定义分区数
删除主题
bin/kafka-topics.sh --zookeeper hadoop101:2181 --delete --topic first
需要server.properties中设置delete.topic.enable=true
查看主题的信息
bin/kafka-topics.sh --zookeeper hadoop101:2181 --describe --topic second
发送消息
按下回车开始输入,每敲一条消息都会在接收端有所响应。
bin/kafka-console-producer.sh --broker-list hadoop101:9092 --topic second
接收消息
bin/kafka-console-consumer.sh --zookeeper hadoop101:2181 --from-beginning --topic second
api的应用
先模拟开启一个消费者在客户端
再来代码中建立一个生产者并发送数据
- 开启一个消费者:
bin/kafka-console-consumer.sh --zookeeper hadoop101:2181 --from-beginning --topic second
package com.uu;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* Created by Administrator on 2019/10/27.
*/
public class KafkaTest {
//新建一个生产者
public static void main(String[] args) {
Properties props = new Properties();
// Kafka服务端的主机名和端口号
props.put("bootstrap.servers", "hadoop101:9092");
// 等待所有副本节点的应答
props.put("acks", "all");
// 消息发送最大尝试次数
props.put("retries", 0);
// 发送缓存区内存大小
props.put("buffer.memory", 33554432);
// key序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// value序列化
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<String, String>(props);
for (int i = 0; i < 100; i++){
//节点名字,key & value
producer.send(new ProducerRecord<String, String>("second", Integer.toString(i), Integer.toString(i)));
// 重复:带回调函数
producer.send(new ProducerRecord<String, String>("second", Integer.toString(i), "hello world-" + i), new Callback() {
public void onCompletion(RecordMetadata metadata, Exception exception) {
//回调函数,若没有异常,输出响应的信息
if(exception == null){
System.out.println(metadata);
}
}
});
producer.close();
}
}
}
自定义分区操作
实现了Partitioner 。
partition的return值为分区的标准
在生产者代码中对于属性在设置出分区属性。
package com.bigdata.kafka;
import java.util.Map;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
public class CustomPartitioner implements Partitioner {
@Override
public void configure(Map<String, ?> configs) {
}
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
// 控制分区
return 0;
}
@Override
public void close() {
}
}
生产者代码
package com.bigdata.kafka;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class PartitionerProducer {
public static void main(String[] args) {
Properties props = new Properties();
// Kafka服务端的主机名和端口号
props.put("bootstrap.servers", "hadoop103:9092");
// 等待所有副本节点的应答
props.put("acks", "all");
// 消息发送最大尝试次数
props.put("retries", 0);
// 发送缓存区内存大小
props.put("buffer.memory", 33554432);
// key序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// value序列化
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// 自定义分区
props.put("partitioner.class", "com.bigdata.kafka.CustomPartitioner");
Producer<String, String> producer = new KafkaProducer<>(props);
producer.send(new ProducerRecord<String, String>("second", "1", "bigdata"));
producer.close();
}
}
结果所有的内容都保存在了return的分区中。
api的消费者:
package com.bigdata.kafka.consume;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class CustomNewConsumer {
public static void main(String[] args) {
Properties props = new Properties();
// 定义kakfa 服务的地址,不需要将所有broker指定上
props.put("bootstrap.servers", "hadoop101:9092");
// 制定consumer group
props.put("group.id", "test");
// 是否自动确认offset
props.put("enable.auto.commit", "true");
// 自动确认offset的时间间隔
props.put("auto.commit.interval.ms", "1000");
// key的序列化类
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// value的序列化类
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 定义consumer
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
// 消费者订阅的topic, 可同时订阅多个
consumer.subscribe(Arrays.asList("first", "second","third"));
while (true) {
// 读取数据,读取超时时间为100ms
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records)
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
}
}