kafka消费者模式

一、单线程消费者模式

package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 单线程
*/
public class MyConsumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
            //设置手动提交  earliest/latest都接着上次的内容继续输出,除非有新消息输入
            kafkaConsumer.commitAsync();
        }
    }
}

二、多线程消费者模式

package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 多线程
*/
public class MyConsumer2 {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"心心威武");
        //多线程(3个线程)
        for(int i=0;i<=3;i++){
            new Thread(new Runnable() {
                @Override
                public void run() {
                    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
                    kafkaConsumer.subscribe(Collections.singleton("kb23"));
                    while(true){
                        ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
                        for (ConsumerRecord<String, String> record : records) {
                            System.out.println(Thread.currentThread().getName()+
                                    " topic: "+record.topic()+
                                    " partition: "+record.partition()+
                                    " 偏移量: "+record.offset()+
                                    " value: "+record.value()+
                                    " 时间戳: "+record.timestamp());
                        }
                    }
                }
            }).start();
        }
    }
}
"C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA 。。。。。。。。。。。。。。。。。。。。。。。。
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
Thread-3 topic: kb23 partition: 0 偏移量: 0 value: hello java 时间戳: 1695173593009
Thread-3 topic: kb23 partition: 0 偏移量: 1 value: hello c== 时间戳: 1695173606546
Thread-2 topic: kb23 partition: 1 偏移量: 0 value: dufiudhifch 时间戳: 1695174679229
Thread-1 topic: kb23 partition: 2 偏移量: 0 value: hel 时间戳: 1695173599314
Thread-3 topic: kb23 partition: 0 偏移量: 2 value: djfhjsjkhfk 时间戳: 1695174683054
Thread-1 topic: kb23 partition: 2 偏移量: 1 value: hello world 时间戳: 1695173611446
Thread-2 topic: kb23 partition: 1 偏移量: 1 value: hsdakhskfhak 时间戳: 1695174686318
Thread-1 topic: kb23 partition: 2 偏移量: 2 value: hshcdshcdskc 时间戳: 1695174681057
Thread-3 topic: kb23 partition: 0 偏移量: 3 value: jkfdsajklfjalds 时间戳: 1695174689058
Thread-1 topic: kb23 partition: 2 偏移量: 3 value: dhjfhkshkf 时间戳: 1695174684802

三、消费者模式seek方法

package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;

/*
* seek指定开始消费的位置
*/
public class MyConsumerSeek {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group3");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));

        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.size()==0){
            kafkaConsumer.poll(Duration.ofMillis(1000));
            assignment = kafkaConsumer.assignment();
        }
        for (TopicPartition topicPartition :
                assignment) {
            //topic:kb23 tp-0:4 tp-1:5  tp-2:4
            System.out.println(topicPartition.topic()+"\t"+topicPartition.partition());
            if (topicPartition.partition()==0){
                kafkaConsumer.seek(topicPartition,4);
            }else if (topicPartition.partition()==1){
                kafkaConsumer.seek(topicPartition,5);
            }else if (topicPartition.partition()==2){
                kafkaConsumer.seek(topicPartition,4);
            }
        }
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
        }
    }
}

"C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA.。。。。。。。。。。。。。
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
kb23    2
kb23    1
kb23    0
topic:kb23 partition:2 偏移量:4 value:sjhkdksahkdah 时间戳:1695174687827
topic:kb23 partition:2 偏移量:5 value:hhh1 时间戳:1695175898301
topic:kb23 partition:2 偏移量:6 value:2222 时间戳:1695176003767
topic:kb23 partition:2 偏移量:7 value:444 时间戳:1695176010084
topic:kb23 partition:2 偏移量:8 value:ppp 时间戳:1695177956251
topic:kb23 partition:2 偏移量:9 value:ppp1 时间戳:1695178017439
topic:kb23 partition:2 偏移量:10 value:ppp3 时间戳:1695178021374
topic:kb23 partition:2 偏移量:11 value:ananaq 时间戳:1695179560702
topic:kb23 partition:1 偏移量:5 value:qqq 时间戳:1695175970133

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值