目录
一、Kafka基础API
1.1、创建KafkaAdminClient
Properties prop = new Properties();
prop.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
KafkaAdminClient client = (KafkaAdminClient) KafkaAdminClient.create(prop);
1.2、异步创建Topic信息
CreateTopicsResult topic01 = client.createTopics(Arrays.asList(new NewTopic("topic01", 3, (short) 3)));
1.3、同步创建Topic信息
CreateTopicsResult topic02 = client.createTopics(Arrays.asList(new NewTopic("topic02", 3, (short) 3)));
topic02.all().get();
1.4、查看Topic列表
ListTopicsResult topics = client.listTopics();
Set<String> names = topics.names().get();
for (String name :
names) {
System.out.println(name);
}
1.5、异步删除Topic
DeleteTopicsResult deleteTopics = client.deleteTopics(Arrays.asList("topic01", "topic02"));
1.6、同步删除Topic
deleteTopics.all().get();
1.7、查看topic详情信息
DescribeTopicsResult dtr = client.describeTopics(Arrays.asList("topic01"));
Map<String, TopicDescription> topicDescriptionMap = dtr.all().get();
for (Map.Entry<String, TopicDescription> entry :
topicDescriptionMap.entrySet()) {
System.out.println(entry.getKey()+"\t"+entry.getValue());
}
1.8、关闭client
client.close();
二、生产者和消费者
2.1、生产者:
package com.baron.quickStart;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaProducerQuickStart {
public static void main(String[] args) {
// 1、创建KafkaProducer
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
// 2、生产消息
for(int i=0; i<30; i++) {
ProducerRecord<String, String> record =
new ProducerRecord<>("topic01", "key" + i, "value" + i);
// new ProducerRecord<>("topic01", "value" + i);
// 发送消息给服务器
producer.send(record);
}
// 3、关闭producer
producer.close();
}
}
2.2、消费者:
package com.baron.quickStart;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.time.Duration;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaConsumerQuickStart_1 {
public static void main(String[] args) {
// 1、创建KafkaConsumer
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
// 以组管理消费者
props.put(ConsumerConfig.GROUP_ID_CONFIG, "g2");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
// 2、订阅相关的topics
consumer.subscribe(Pattern.compile("^topic.*"));
// 3、遍历消息队列
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
if (!consumerRecords.isEmpty()) { // 从队列中取到了数据
Iterator<ConsumerRecord<String, String>> it = consumerRecords.iterator();
while (it.hasNext()) {
// 获取一个消息
ConsumerRecord<String, String> record = it.next();
String topic = record.topic();
int partition = record.partition();
long offset = record.offset();
String key = record.key();
String value = record.value();
long timestamp = record.timestamp();
System.out.println(topic+"\t"+partition+","+offset+"\t"+key+""+value+timestamp);
}
}
}
}
}
2.3、消费者(手动指定起始消费位置):
package com.baron.quickStart;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.regex.Pattern;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaConsumerQuickStart_2 {
public static void main(String[] args) {
// 1、创建KafkaConsumer
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
// 以组管理消费者
// props.put(ConsumerConfig.GROUP_ID_CONFIG, "g2");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
// 2、订阅相关的topics,手动指定消费分区,失去组管理特性
List<TopicPartition> partitions = Arrays.asList(new TopicPartition("topic01", 0));
consumer.assign(partitions);
// 指定消费分区的位置(从topic01中0分区的开头-0位置进行消费)
// consumer.seekToBeginning(partitions);
// 指定消费分区的位置(从topic01中0分区的指定位置进行消费)
consumer.seek(new TopicPartition("topic01", 0), 1);
// 3、遍历消息队列
while (true) {
// 一秒取一次
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
if (!consumerRecords.isEmpty()) { // 从队列中取到了数据
Iterator<ConsumerRecord<String, String>> it = consumerRecords.iterator();
while (it.hasNext()) {
// 获取一个消息
ConsumerRecord<String, String> record = it.next();
String topic = record.topic();
int partition = record.partition();
long offset = record.offset();
String key = record.key();
String value = record.value();
long timestamp = record.timestamp();
System.out.println(topic+"\t"+partition+","+offset+"\t"+key+""+value+timestamp);
}
}
}
}
}
三、自定义分区
消费策略:如果消息有key,根据key进行消费,如果没有key,则轮询进行消费。
3.1、自定义分区策略:
package com.baron.partitioner;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.utils.Utils;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* 自定义分区策略
* @author baron
* @description
* @date 2021/9/7 9:57
*/
public class UserDefinePartitioner implements Partitioner {
private AtomicInteger counter = new AtomicInteger(0);
/**
* 返回分区号
* @param topic
* @param key
* @param keyBytes
* @param value
* @param valueBytes
* @param cluster
* @return
*/
@Override
public int partition(String topic,
Object key,
byte[] keyBytes,
Object value,
byte[] valueBytes,
Cluster cluster) {
// 获取所有分区
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
if (keyBytes == null){
// 轮询
int andIncrement = counter.getAndIncrement();
return Utils.toPositive(andIncrement)%numPartitions;
}else {
// 对key进行hash运算
return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
}
}
@Override
public void close() {
System.out.println("close");
}
@Override
public void configure(Map<String, ?> configs) {
System.out.println("configure");
}
}
3.2、采用自定义分区策略生成消息:
package com.baron.partitioner;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaProducerPartitioner {
public static void main(String[] args) {
// 1、创建KafkaProducer
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//
/*
* 配置分区策略
* 1、如果指定分区则固定将消息发到指定的分区
* 2、如果没有指定分区,但是有key的话则对key进行hash来选择分区
* 3、如果没有指定分区也没有key,则按照轮询的策略来选择分区
* */
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, UserDefinePartitioner.class.getName());
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
// 2、生产消息
for(int i=0; i<6; i++) {
ProducerRecord<String, String> record =
new ProducerRecord<>("topic01", "key" + i, "value" + i);
// new ProducerRecord<>("topic01", "value" + i);
// 发送消息给服务器
producer.send(record);
}
// 3、关闭producer
producer.close();
}
}
四、序列化
4.1、user实体类:
package com.baron.serializer;
import java.io.Serializable;
import java.util.Date;
/**
* @author baron
* @description
* @date 2021/9/7 14:50
*/
public class User implements Serializable {
private Integer id;
private String name;
private Date birthday;
public User() {
}
public User(Integer id, String name, Date birthday) {
this.id = id;
this.name = name;
this.birthday = birthday;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Date getBirthday() {
return birthday;
}
public void setBirthday(Date birthday) {
this.birthday = birthday;
}
@Override
public String toString() {
return "User{" +
"id=" + id +
", name='" + name + '\'' +
", birthday=" + birthday +
'}';
}
}
4.2、user自定义序列化类:
package com.baron.serializer;
import org.apache.commons.lang3.SerializationUtils;
import org.apache.kafka.common.serialization.Deserializer;
import java.util.Map;
/**
* @author baron
* @description
* @date 2021/9/7 14:47
*/
public class UserDefineDeserializer implements Deserializer<Object> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
System.out.println("configure");
}
@Override
public Object deserialize(String topic, byte[] data) {
return SerializationUtils.deserialize(data);
}
@Override
public void close() {
System.out.println("close");
}
}
4.3、user反序列化类:
package com.baron.serializer;
import org.apache.commons.lang3.SerializationUtils;
import org.apache.kafka.common.serialization.Deserializer;
import java.util.Map;
/**
* @author baron
* @description
* @date 2021/9/7 14:47
*/
public class UserDefineDeserializer implements Deserializer<Object> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
System.out.println("configure");
}
@Override
public Object deserialize(String topic, byte[] data) {
return SerializationUtils.deserialize(data);
}
@Override
public void close() {
System.out.println("close");
}
}
4.4、消费者:
package com.baron.serializer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaConsumerUser {
public static void main(String[] args) {
// 1、创建KafkaConsumer
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, UserDefineDeserializer.class.getName());
// 以组管理消费者
props.put(ConsumerConfig.GROUP_ID_CONFIG, "g1");
KafkaConsumer<String, User> consumer = new KafkaConsumer<>(props);
// 2、订阅相关的topics
// consumer.subscribe(Pattern.compile("^topic.*"));
consumer.subscribe(Arrays.asList("topic02"));
// 3、遍历消息队列
while (true) {
ConsumerRecords<String, User> consumerRecords = consumer.poll(Duration.ofSeconds(1));
if (!consumerRecords.isEmpty()) { // 从队列中取到了数据
Iterator<ConsumerRecord<String, User>> it = consumerRecords.iterator();
while (it.hasNext()) {
// 获取一个消息
ConsumerRecord<String, User> record = it.next();
String topic = record.topic();
int partition = record.partition();
long offset = record.offset();
String key = record.key();
User value = record.value();
long timestamp = record.timestamp();
System.out.println(topic+"\t"+partition+","+offset+"\t"+key+""+value+timestamp);
}
}
}
}
}
五、拦截器
5.1、自定义拦截器:
package com.baron.interceptors;
import com.baron.serializer.User;
import org.apache.kafka.clients.producer.ProducerInterceptor;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Map;
/**
* @author baron
* @description
* @date 2021/9/7 15:15
*/
public class UserDefineProducerInterceptors implements ProducerInterceptor<String, User> {
@Override
public ProducerRecord<String, User> onSend(ProducerRecord<String, User> record) {
return new ProducerRecord(record.topic(), record.key(), record.value()+" --- baron");
}
@Override
public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
System.out.println("metadata:" + metadata + ",exception:" + exception);
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
}
}
5.2、生产者:
package com.baron.interceptors;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaProducerInterceptors {
public static void main(String[] args) {
// 1、创建KafkaProducer
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, UserDefineProducerInterceptors.class.getName());
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
// 2、生产消息
for(int i=0; i<8; i++) {
ProducerRecord<String, String> record =
new ProducerRecord<>("topic01", "key" + i, "value" + i);
// new ProducerRecord<>("topic01", "value" + i);
// 发送消息给服务器
producer.send(record);
}
// 3、关闭producer
producer.close();
}
}
5.3、消费者:
package com.baron.interceptors;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;
/**
* @author baron
* @description
* @date 2021/9/6 15:24
*/
public class KafkaConsumerInterceptors {
public static void main(String[] args) {
// 1、创建KafkaConsumer
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
// 以组管理消费者
props.put(ConsumerConfig.GROUP_ID_CONFIG, "g1");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
// 2、订阅相关的topics
// consumer.subscribe(Pattern.compile("^topic.*"));
consumer.subscribe(Arrays.asList("topic01"));
// 3、遍历消息队列
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
if (!consumerRecords.isEmpty()) { // 从队列中取到了数据
Iterator<ConsumerRecord<String, String>> it = consumerRecords.iterator();
while (it.hasNext()) {
// 获取一个消息
ConsumerRecord<String, String> record = it.next();
String topic = record.topic();
int partition = record.partition();
long offset = record.offset();
String key = record.key();
String value = record.value();
long timestamp = record.timestamp();
System.out.println(topic+"\t"+partition+","+offset+"\t"+key+""+value+timestamp);
}
}
}
}
}