使kafka api生产/消费/获取分区状态

1. 生产 CustomProducer


import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;


import java.util.Properties;


public class CustomProducer {
    public void CustomProducer(){}
        public static void main(String[] args) {
            new CustomProducer().ProducerTest1();

        }




    public void ProducerTest1(){
        Properties props = new Properties();
        // Kafka服务端的主机名和端口号

        //props.put("bootstrap.servers", "10.139.12.149:9092,10.139.12.15:9092,10.139.12.150:9092");
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.139.12.149:9092,10.139.12.15:9092,10.139.12.150:9092");
        // 等待所有副本节点的应答
       // props.put("acks", "-1");
       // props.put(ProducerConfig.ACKS_CONFIG, "-1");
        // 消息发送最大尝试次数
       // props.put("retries", 0);
        // 一批消息处理大小
        //props.put("batch.size", 16384);
      //  props.put("group-id","test");

        // 请求延时
       // props.put("linger.ms", 1);
        // 发送缓存区内存大小
     //   props.put("buffer.memory", 33554432);
        // key序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        // value序列化
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<>(props);

        //producer.send(new ProducerRecord<String, String>("bigdata2 ", Integer.toString(i), "hello world-" + i));
        for (int i = 0; i < 50; i++) {
            producer.send(new ProducerRecord<String, String>("bigdata2",  "hello -111122222222" + i));
        }
        producer.close();
    }

}

2. 消费 MyConsumer

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Properties;

public class MyConsumer extends Thread {
    private String topic;
    KafkaConsumer<String, String> consumer;

    public MyConsumer() {
        Properties properties = new Properties();
        // properties.setProperty("bootstrap.servers", "kafka1:9092,kafka2:9092,kafka3:9092");
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka1:9092,kafka2:9092,kafka3:9092");
        properties.setProperty("group.id", "test");
        //此参数有效条件 默认值是latest 及最大可消费offset也是最新发布存入的消息offset
        //1.还在同一哥组长消费但是之前的最小可消费offset已经不存在
        // 或者换了一个组去消费
        // properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        //关闭自动提交offset
        properties.put("enable.auto.commit", "false");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<String, String>(properties);
        //指定topic
        // consumer.subscribe(Arrays.asList("bigdata2","bigdata"));
        consumer.subscribe(Arrays.asList("bigdata2"));


        //https://blog.csdn.net/u011669700/article/details/80023160
        //不改变当前offset,指定从这个topic和partition的开始位置获取。
        //用于分配topic和partition
        //consumer.assign(Arrays.asList(new TopicPartition("bigdata2", 0)));
        //consumer.seekToBeginning(Arrays.asList(new TopicPartition("bigdata2", 0)));


        //对指定的offset处进行消费
        //用于分配topic和partition
/*        consumer.assign(Arrays.asList(new TopicPartition("bigdata2", 0)));
        //对指定的offset处进行消费
        consumer.seek(new TopicPartition("bigdata2",0),442);*/

    }

    @Override
    public void run() {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> recode : records) {

                SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                String dateString = formatter.format(recode.timestamp());

                System.out.println("timestamp= " + dateString + "       partition=" + recode.partition() + "         recodeOffset = " + recode.offset() + "   " + "recodeValue = " + recode.value());
            }
        }
    }

    public void setConsumerTest() {
        Properties props = new Properties();
        props.put("bootstrap.servers", "kafka1:9092,kafka2:9092,kafka3:9092");
        props.put("group.id", "test");
        //自动提交offset 提交间隔时间为1000ms
/*        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");*/
        //手动提交offset
        props.put("enable.auto.commit", "false");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        //指定订阅的topic
        //consumer.subscribe(Arrays.asList("bigdata2,bigdata"));
        consumer.subscribe(Arrays.asList("bigdata2"));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());


        }
    }


    public static void main(String[] args) {
        new MyConsumer().start();
        // new MyConsumer().setConsumerTest();
    }
}


3. 获取分区状态数据

获取最大最小offset
获取topic 中可消费消息的总数



import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;

@Slf4j
public class KafkaConsumerDemo {
    private final static String TOPIC = "bigdata2";
    private final static String BOOTSTRAP_SERVERS = "kafka1:9092,kafka2:9092,kafka3:9092";

    private static Consumer<Long, String> createConsumer() {
        final Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "KafkaExampleConsumer");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        final Consumer<Long, String> consumer = new KafkaConsumer<>(props);

        return consumer;
    }

    private void partitionUpateTime(){

        Properties props = new Properties();
        // Kafka服务端的主机名和端口号

        //props.put("bootstrap.servers", "10.139.12.149:9092,10.139.12.15:9092,10.139.12.150:9092");
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.139.12.149:9092,10.139.12.15:9092,10.139.12.150:9092");
        // 等待所有副本节点的应答
        // props.put("acks", "-1");
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        // 消息发送最大尝试次数
        //props.put("retries", 0);
        // 一批消息处理大小
       // props.put("batch.size", 16384);
        props.put("group-id","test");

        // 请求延时
       // props.put("linger.ms", 1);
        // 发送缓存区内存大小
      //  props.put("buffer.memory", 33554432);
        // key序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        // value序列化
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<>(props);

        List<PartitionInfo> list = producer.partitionsFor(TOPIC);
        for(PartitionInfo pi : list){
            System.out.println(pi);
            System.out.println("====================");
        }

    }


    // 获取某个Topic的所有分区以及分区最新的Offset
    public static void getPartitionsForTopic() {
        final Consumer<Long, String> consumer = createConsumer();

        Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(TOPIC);
        System.out.println("Get the partition info as below:");
        List<TopicPartition> tp =new ArrayList<TopicPartition>();
        partitionInfos.forEach(str -> {
            System.out.println("Partition Info:");
            System.out.println(str);


            tp.add(new TopicPartition(TOPIC,str.partition()));
            consumer.assign(tp);
            consumer.seekToEnd(tp);

            System.out.println("Partition " + str.partition() + " 's latest offset is '" + consumer.position(new TopicPartition(TOPIC, str.partition())));
        });
    }

    public void getOffset(){
        final Consumer<Long, String> consumer = createConsumer();
        System.out.println("=====================================");
        //Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(TOPIC);
        TopicPartition partitions0 =  new TopicPartition(TOPIC,0);
        TopicPartition partitions1 =  new TopicPartition(TOPIC,1);
        Collection<TopicPartition> topicPartitionCollection = new ArrayList<>();
        topicPartitionCollection.add(partitions0);
        topicPartitionCollection.add(partitions1);

        //partitions0
        Map<TopicPartition, Long> beginningOffsets =  consumer.beginningOffsets(topicPartitionCollection);
       // System.out.println(JSON.toJSONString(beginningOffsets));
//        System.out.println(beginningOffsets);
//        System.out.println("=====================================");

        Map<TopicPartition, Long> endOffsets = consumer.endOffsets(topicPartitionCollection);
        System.out.println(beginningOffsets+"   "+endOffsets);
        System.out.println("======================================");

        //计算测主题当前可消费消息的总数:
        // 每个分区的(beginningOffsets-endOffsets)之和,y也就是beginningOffsets之和 减去 endOffsets之和


/*
             Long beginningOffsetsSUm = null;
             Long endOffsetsSUm = (long)0;
            for(Long value : endOffsets.values()){
                endOffsetsSUm += value;
            }
*/


            //先在将map转换为set 通过stream+lamber表达式转换为list
            List<Long> endOffsetsToList =  endOffsets.entrySet().stream().map(x -> x.getValue()).collect(Collectors.toList());
            Long endOffsetsSUm =  endOffsetsToList.stream().mapToLong(x->x).sum();
            System.out.println("endOffsetsSUm = "+endOffsetsSUm);

            List<Long> beginningOffsetsToList = beginningOffsets.entrySet().stream().map( x-> x.getValue()).collect(Collectors.toList());
            Long beginningOffsetsSUm = beginningOffsetsToList.stream().mapToLong(x->x).sum();
             //log.info("beginningOffsetsSUm {}",beginningOffsetsSUm);
              System.out.println("beginningOffsetsSUm = "+ beginningOffsetsSUm);


              if (endOffsetsSUm > beginningOffsetsSUm){
                  System.out.println("messageSum = "+ (endOffsetsSUm-beginningOffsetsSUm));
              }



 /*       Map<String, List<PartitionInfo>> listTopics = consumer.listTopics();

        System.out.println(JSON.toJSONString(listTopics));*/


    }
    //获取topic 中可消费消息的总数
    public void getMessagesTotalNumberOfTopic(){
        final Consumer<Long, String> consumer1 = createConsumer();

        Map<String, List<PartitionInfo>> topicList =  consumer1.listTopics();

        topicList.forEach((topic,list) -> {
            System.out.println(topic+ "   " +list);
        });


    }

    //PartitionOffsetRequestInfo
    // 持续不断的消费数据
    public static void run() throws InterruptedException {
        final Consumer<Long, String> consumer = createConsumer();
        consumer.subscribe(Collections.singletonList(TOPIC));
        final int giveUp = 100; int noRecordsCount = 0;

        while(true){
            final ConsumerRecords<Long, String> consumerRecords = consumer.poll(1000);

            if(consumerRecords.count()==0){
                noRecordsCount++;
                if(noRecordsCount > giveUp) break;
                else continue;
            }

            // int i = 0;
            consumerRecords.forEach(record -> {
                // i = i + 1;
                System.out.printf("Consumer Record:(%d, %s, %d, %d)\n",
                        record.key(), record.value(),
                        record.partition(), record.offset());
            });

            // System.out.println("Consumer Records " + i);
            consumer.commitAsync();
        }

        consumer.close();
        System.out.println("Kafka Consumer Exited");
    }


    public static void main(String[] args) {
        KafkaConsumerDemo kafkaConsumerDemo =  new KafkaConsumerDemo();
       // kafkaConsumerDemo.getMessagesTotalNumberOfTopic();
       // kafkaConsumerDemo.getPartitionsForTopic();
       kafkaConsumerDemo.getOffset();
        //kafkaConsumerDemo.partitionUpateTime();
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值