producer
package com.soul.kafka.level07;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class _13KafkaProducerOffset {
public static void main(String[] args) {
//1.创建链接参数
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka01:9092,kafka02:9092,kafka03:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//2.创建生产者
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
//3.封账消息队列
for (int i = 0; i < 5; i++) {
//注意需要已存在的Topic
ProducerRecord<String, String> record = new ProducerRecord<>("topic02", "K" + i, "V" + i);
producer.send(record);
}
producer.close();
}
}
consumer latest策略
package com.soul.kafka.level07;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
/**
Kafka消费者默认对于未订阅的topic的offset的时候,
也就是系统并没有存储该消费者的消费分区的记录信息,
默认Kafka消费者的默认首次消费策略:latest
auto.offset.reset=latest
earliest - 自动将偏移量重置为最早的偏移量
latest - 自动将偏移量重置为最新的偏移量
none - 如果未找到消费者组的先前偏移量,则向消费者抛出异常
*/
public class _14KafkaConsumerOffsetLatest {
// offset 的 latest 策略
// 1. kafka未记录过新接入的consumer的偏移量(offset)
// consumer不会记录历史偏移量(offset), 也没有提交过历史偏移量
// kafka会分配consumer启动时, 获取到的数据文件最新的偏移量(offset), 将该offset交给consumer
// consumer消费后, 提交消费完的offset给kafka, 以便后续消费
// 后续consumer宕机重启, 会从kafka拉取上次消费的offset进行消费, 这时不涉及 latest 策略
// 故: latest 策略仅仅作用于新的consumer接入时
//测试
//1. 先启动 _13KafkaProducerOffset 发送5条消息
//2. 再启动 _14KafkaConsumerOffsetLatest 发现未消费之前发送的消息
//3. 再启动 _13KafkaProducerOffset 发送5条消息
//4. 发现 _14KafkaConsumerOffsetLatest 消费了 _13KafkaProducerOffset 刚发送的5条消息
public static void main(String[] args) {
//1.创建Kafka链接参数
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka01:9092,kafka02:9092,kafka03:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//新的分组进行消费 group02
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group02");
// 默认配置是latest
// producer在 "新的consumer" 启动之前已经发送至broker的消息不会被处理
// kafka默认以consumer启动时间来记录第一次的offset(老实人接盘, 我只在乎现在的你和以后的你)
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
//2.创建Topic消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
while (recordIterator.hasNext()) {
ConsumerRecord<String, String> record = recordIterator.next();
String key = record.key();
String value = record.value();
long offset = record.offset();
int partition = record.partition();
System.out.println("key:" + key + ", value:" + value
+ ", partition:" + partition + ", offset:" + offset);
}
}
}
}
consumer earliest策略
package com.soul.kafka.level07;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Iterator;
import java.util.Properties;
import java.util.regex.Pattern;
public class _15KafkaConsumerEarliest {
// offset 的 earliest 策略
// 1. kafka未记录过新接入的consumer的偏移量(offset)
// consumer不会记录历史偏移量(offset), 也没有提交过历史偏移量
// kafka会分配读取到分区的数据文件最早的偏移量(offset), 分配给consumer
// consumer消费后, 提交消费完的offset给kafka, 以便后续消费
// 后续consumer宕机重启, 会从kafka拉取上次消费的offset进行消费, 这时不涉及 latest 策略
// 故: earliest 策略也是仅仅作用于新的consumer接入时
//测试
//1. 先启动 _13KafkaProducerOffset 发送5条消息
//2. 再启动 _14KafkaConsumerOffsetLatest 发现消费了 _13KafkaProducerOffset 刚发送的5条消息
public static void main(String[] args) {
//1.创建Kafka链接参数
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka01:9092,kafka02:9092,kafka03:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//新的分组进行消费 group03
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group03");
// 默认配置是latest
// earliest 从分区最早未消费的offset开始消费(PUA男"接盘", 以前的你, 现在的你和以后的你, 都是我喜欢的你)
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//2.创建Topic消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
while (recordIterator.hasNext()) {
ConsumerRecord<String, String> record = recordIterator.next();
String key = record.key();
String value = record.value();
long offset = record.offset();
int partition = record.partition();
System.out.println("key:" + key + ", value:" + value
+ ", partition:" + partition + ", offset:" + offset);
}
}
}
}
consumer 自动提交策略
package com.soul.kafka.level07;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
/**
*
Kafka消费者在消费数据的时候默认会定期的提交消费的偏移量,
这样就可以保证所有的消息至少可以被消费者消费1次,
用户可以通过以下两个参数配置:
enable.auto.commit = true 默认
auto.commit.interval.ms = 5000 默认
如果需要自己管理offset的自动提交,
可以关闭offset的自动提交,手动管理offset提交的偏移量,
注意提交的offset偏移量永远都要比本次消费的偏移量+1,
因为提交的offset是kafka消费者下一次抓取数据的位置。
*
*/
public class _16KafkaConsumerOffsetAutoCommit {
//测试
//1. 先启动 _13KafkaProducerOffset 发送5条消息
//2. 再启动 _16KafkaConsumerOffsetAutoCommit 消费了 _13KafkaProducerOffset 刚发送的5条消息
//3. 再关闭 _16KafkaConsumerOffsetAutoCommit 需10s内处理
//4. 再启动 _16KafkaConsumerOffsetAutoCommit 又消费了 _13KafkaProducerOffset 刚发送的5条消息, 等10s, 关闭
//5. 再启动 _16KafkaConsumerOffsetAutoCommit 未消费
//6. 再启动 _13KafkaProducerOffset 发送5条消息
//7. 发现 _16KafkaConsumerOffsetAutoCommit 开始消费新数据, 即offset提交成功至kafka才会开始新的消费
public static void main(String[] args) {
//1.创建Kafka链接参数
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka01:9092,kafka02:9092,kafka03:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//新的分组进行消费 group04
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group04");
// 默认配置是latest
// earliest 从分区最早未消费的offset开始消费(PUA男"接盘", 以前的你, 现在的你和以后的你, 都是我喜欢的你)
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//将offset自动提交时间间隔延长 10s
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
//默认值 true, 开启自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
//2.创建Topic消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
while (recordIterator.hasNext()) {
ConsumerRecord<String, String> record = recordIterator.next();
String key = record.key();
String value = record.value();
long offset = record.offset();
int partition = record.partition();
System.out.println("key:" + key + ", value:" + value
+ ", partition:" + partition + ", offset:" + offset);
}
}
}
}
consumer 手动提交策略
package com.soul.kafka.level07;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Pattern;
/**
*
Kafka消费者在消费数据的时候默认会定期的提交消费的偏移量,
这样就可以保证所有的消息至少可以被消费者消费1次,
用户可以通过以下两个参数配置:
enable.auto.commit = true 默认
auto.commit.interval.ms = 5000 默认
如果需要自己管理offset的自动提交,
可以关闭offset的自动提交,手动管理offset提交的偏移量,
注意提交的offset偏移量永远都要比本次消费的偏移量+1,
因为提交的offset是kafka消费者下一次抓取数据的位置。
*
*/
public class _17KafkaConsumerOffsetUDFCommit {
public static void main(String[] args) {
//1.创建Kafka链接参数
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka01:9092,kafka02:9092,kafka03:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//新的分组进行消费 group05
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group05");
//关闭自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
//2.创建Topic消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
//从队列中获取到了消息数据
if (!consumerRecords.isEmpty()) {
Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
while (recordIterator.hasNext()) {
ConsumerRecord<String, String> record = recordIterator.next();
String key = record.key();
String value = record.value();
//提交的offset偏移量永远都要比本次消费的偏移量+1
//否则每次consumer重启会读取上次的最后一条消息, 并重复消费该消息
long offset = record.offset();
int partition = record.partition();
//offsets用于记录分区每个消息消费完成后的偏移量offset
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<TopicPartition, OffsetAndMetadata>();
//消费的元数据信息, 及offset
offsets.put(new TopicPartition(record.topic(), partition),
//提交的offset偏移量永远都要比本次消费的偏移量+1 !!!
//否则每次consumer重启会读取上次的最后一条消息, 并重复消费该消息
new OffsetAndMetadata(offset + 1));
//异步提交消费者offset
consumer.commitAsync(offsets, new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
//提交回调信息
System.out.println("完成:" + offset + "提交!");
}
});
System.out.println("key:" + key + ", value:" + value
+ ", partition:" + partition + ", offset:" + offset);
}
}
}
}
}
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.soul</groupId>
<artifactId>kafka</artifactId>
<version>0.0.1</version>
<name>kafka</name>
<properties>
<java.version>1.8</java.version>
</properties>
<dependencies>
<!-- kafka begin -->
<!--https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients-->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.2.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.25</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
<!-- kafka end -->
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
</project>
log4j.properties
log4j.rootLogger = info,console
log4j.appender.console = org.apache.log4j.ConsoleAppender
log4j.appender.console.Target = System.out
log4j.appender.console.layout = org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern = %p %d{yyyy-MM-dd HH:mm:ss} %c - %m%n