目录
消费者Consumer
自动和手动提交offset
props.setProperty("enable.auto.commit", "true");
KafkaConsumer<String,String> consumer = new KafkaConsumer(props);
// 消费订阅哪一个Topic或者几个Topic
consumer.subscribe(Arrays.asList(TOPIC_NAME));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
for (ConsumerRecord<String, String> record : records)
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(),record.offset(), record.key(), record.value());
}
props.setProperty("enable.auto.commit", "false");
KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
// 消费订阅哪一个Topic或者几个Topic
consumer.subscribe(Arrays.asList(TOPIC_NAME));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
for (ConsumerRecord<String, String> record : records) {
// 想把数据保存到数据库,成功就成功,不成功...
// TODO record 2 db
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
// 如果失败,则回滚, 不要提交offset
}
// 如果成功,手动通知offset提交
consumer.commitAsync();
}
单Partition提交offset
每个Partition单独提交offset:
props.setProperty("enable.auto.commit", "false");
KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
// 消费订阅哪一个Topic或者几个Topic
consumer.subscribe(Arrays.asList(TOPIC_NAME));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
// 每个partition单独处理
for(TopicPartition partition : records.partitions()){
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
//这个一次消费单个partition,poll到的消息最后的一个消息的offset
long lastOffset = pRecord.get(pRecord.size() -1).offset();
// 单个partition中的offset,并且进行提交
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition,new OffsetAndMetadata(lastOffset+1));
// 提交offset
consumer.commitSync(offset);
System.out.println("=============partition - "+ partition +" end================");
}
手动控制一到多个分区
TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
// 消费订阅某个Topic的某个分区
consumer.assign(Arrays.asList(p0));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
// 每个partition单独处理
for(TopicPartition partition : records.partitions()){
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
long lastOffset = pRecord.get(pRecord.size() -1).offset();
// 单个partition中的offset,并且进行提交
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition,new OffsetAndMetadata(lastOffset+1));
// 提交offset
consumer.commitSync(offset);
System.out.println("=============partition - "+ partition +" end================");
}
多线程并发处理
kafka的producer是线程安全的
kafka的consumer不是线程安全的
方式一:这种类型是经典模式,每一个线程单独创建一个KafkaConsumer,用于保证线程安全在对数据一致性有较高要求的时候适合使用方式一,方式一对partition有较好的管控能力,方便处理失败rollback重新处理
public static void main(String[] args) throws InterruptedException {
KafkaConsumerRunner r1 = new KafkaConsumerRunner();
Thread t1 = new Thread(r1);
t1.start();
Thread.sleep(15000);
r1.shutdown();
}
public static class KafkaConsumerRunner implements Runnable{
private final AtomicBoolean closed = new AtomicBoolean(false);
private final KafkaConsumer consumer;
public KafkaConsumerRunner() {
Properties props = new Properties();
props.put("bootstrap.servers", KAFKA_SERVER);
props.put("group.id", "test");
props.put("enable.auto.commit", "false");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer<>(props);
TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
consumer.assign(Arrays.asList(p0,p1));
}
public void run() {
try {
while(!closed.get()) {
//处理消息
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
// 处理每个分区的消息
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(),record.offset(), record.key(), record.value());
}
// 返回去告诉kafka新的offset
long lastOffset = pRecord.get(pRecord.size() - 1).offset();
// 注意加1
consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
}
}
}catch(WakeupException e) {
if(!closed.get()) {
throw e;
}
}finally {
consumer.close();
}
}
public void shutdown() {
closed.set(true);
consumer.wakeup();
}
}
方式二: 先把数据拿下来,在对每一条数据进行多线程处理
方式二 线程中处理消息失败成功也不知到,不能手动提交offset
适合流式数据,对于数据处理正确性要求不高,及非业务系统,推给我数据我就处理,成功失败不重要
例如:定位gps数据上传
public static void main(String[] args) throws InterruptedException {
String brokerList = KAFKA_SERVER;
String groupId = "test";
int workerNum = 5;
//创建一个CunsumerExecutor,也就是一个线程池
CunsumerExecutor consumers = new CunsumerExecutor(brokerList, groupId, TOPIC_NAME);
consumers.execute(workerNum);
Thread.sleep(1000000);
consumers.shutdown();
}
// Consumer处理
public static class CunsumerExecutor{
private final KafkaConsumer<String, String> consumer;
private ExecutorService executors;
public CunsumerExecutor(String brokerList, String groupId, String topic) {
Properties props = new Properties();
props.put("bootstrap.servers", brokerList);
props.put("group.id", groupId);
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(topic));
}
public void execute(int workerNum) {
executors = new ThreadPoolExecutor(workerNum, workerNum, 0L, TimeUnit.MILLISECONDS,
new ArrayBlockingQueue<>(1000), new ThreadPoolExecutor.CallerRunsPolicy());
while (true) {
ConsumerRecords<String, String> records = consumer.poll(200);
for (final ConsumerRecord record : records) {
executors.submit(new ConsumerRecordWorker(record));
}
}
}
public void shutdown() {
if (consumer != null) {
consumer.close();
}
if (executors != null) {
executors.shutdown();
}
try {
if (!executors.awaitTermination(10, TimeUnit.SECONDS)) {
System.out.println("Timeout.... Ignore for this case");
}
} catch (InterruptedException ignored) {
System.out.println("Other thread interrupted this shutdown, ignore for this case.");
Thread.currentThread().interrupt();
}
}
}
// 记录处理
public static class ConsumerRecordWorker implements Runnable {
private ConsumerRecord<String, String> record;
public ConsumerRecordWorker(ConsumerRecord record) {
this.record = record;
}
@Override
public void run() {
// 假如说数据入库操作
System.out.println("Thread - "+ Thread.currentThread().getName());
System.err.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
}
控制offset起始位置
KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
// jiangzh-topic - 0,1两个partition
TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
// 消费订阅某个Topic的某个分区
consumer.assign(Arrays.asList(p0));
while (true) {
// 手动指定offset起始位置
/*
1、人为控制offset起始位置
2、如果出现程序错误,重复消费一次
*/
/*
1、第一次从0消费【一般情况】
2、比如一次消费了100条, offset置为101并且存入Redis
3、每次poll之前,从redis中获取最新的offset位置
4、每次从这个位置开始消费
*/
consumer.seek(p0, 700);
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
// 每个partition单独处理
for(TopicPartition partition : records.partitions()){
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
for (ConsumerRecord<String, String> record : pRecord) {
System.err.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
long lastOffset = pRecord.get(pRecord.size() -1).offset();
// 单个partition中的offset,并且进行提交
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition,new OffsetAndMetadata(lastOffset+1));
// 提交offset
consumer.commitSync(offset);
System.out.println("=============partition - "+ partition +" end================");
}
}
Consumer限流
KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
// 消费订阅某个Topic的某个分区
consumer.assign(Arrays.asList(p0,p1));
long totalNum = 40;
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
// 每个partition单独处理
for(TopicPartition partition : records.partitions()){
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
long num = 0;
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
/*
1、接收到record信息以后,去令牌桶中拿取令牌
2、如果获取到令牌,则继续业务处理
3、如果获取不到令牌, 则pause等待令牌
4、当令牌桶中的令牌足够, 则将consumer置为resume状态
*/
num++;
if(record.partition() == 0){
if(num >= totalNum){
consumer.pause(Arrays.asList(p0)); //暂停消费
}
}
if(record.partition() == 1){
if(num == 40){
consumer.resume(Arrays.asList(p0)); //重启消费
}
}
}
long lastOffset = pRecord.get(pRecord.size() -1).offset();
// 单个partition中的offset,并且进行提交
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition,new OffsetAndMetadata(lastOffset+1));
// 提交offset
consumer.commitSync(offset);
System.out.println("=============partition - "+ partition +" end================");
}
Consumer Rebalance解析
所有topic的分区总数应等于消费组中的消费者数,消费者大于总的分区数,就会有消费者闲置消费不到消息出现rebalance的情况
1.有新的消费者加如消费者组
2.消费组中的消费者退出
- 消费组中的消费者异常离开消费组
- 消费组中的消费者主动离开消费组
kafka存在的问题:提交为失败会造成重复消费