Kafka 消费 topic 最近 n 条消息

package com.cloudera.kafkademo;

import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;

import java.util.*;
import java.util.logging.Logger;
public class ReceiveLatestMessageUtils {

    private static final Logger logger = Logger.getLogger(ReceiveLatestMessageUtils.class.getName());
    public static void receiveLatestMessage(Map<String, Object> kafkaParams, String topic, Integer count) {
        logger.info("create KafkaConsumer");

        final KafkaConsumer consumer = new KafkaConsumer<>(kafkaParams);

        AdminClient adminClient = AdminClient.create(kafkaParams);

        try {
            DescribeTopicsResult topicResult = adminClient.describeTopics(Arrays.asList(topic));

            Map<String, KafkaFuture<TopicDescription>> descMap = topicResult.values();

            Iterator<Map.Entry<String, KafkaFuture<TopicDescription>>> itr = descMap.entrySet().iterator();

            while (itr.hasNext()) {
                Map.Entry<String, KafkaFuture<TopicDescription>> entry = itr.next();
                logger.info("key: " + entry.getKey());
                List<TopicPartitionInfo> topicPartitionInfoList = entry.getValue().get().partitions();

                for (TopicPartitionInfo topicPartitionInfo : topicPartitionInfoList) {
                    consumerAction(topicPartitionInfo, consumer, topic, count);
                }
            }
            consumer.subscribe(Arrays.asList(topic));
            while (true) {
                ConsumerRecords<String,String> records = consumer.poll(100);
                for (ConsumerRecord<String,String> record : records) {
                    System.out.printf("read offset =%d, key=%s , value= %s, partition=%s\n",
                            record.offset(), record.key(), record.value(), record.partition());
                }
            }
        } catch (Exception ex) {
            ex.printStackTrace();
            logger.info("when calling kafka output error." + ex.getMessage());
        } finally {
            adminClient.close();
            consumer.close();
        }
    }

    private static void consumerAction(TopicPartitionInfo topicPartitionInfo, KafkaConsumer<String, String> consumer, String topic, Integer count) {

        int partitionId = topicPartitionInfo.partition();

        Node node = topicPartitionInfo.leader();

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);

        Map<TopicPartition, Long> mapBeginning = consumer.beginningOffsets(Arrays.asList(topicPartition));

        Iterator<Map.Entry<TopicPartition, Long>> beginIterator = mapBeginning.entrySet().iterator();
        long beginOffset = 0;

        //mapBeginning只有一个元素,因为Arrays.asList(topicPartition)只有一个topicPartition
        while (beginIterator.hasNext()) {
            Map.Entry<TopicPartition, Long> beginEntry = beginIterator.next();
            beginOffset = beginEntry.getValue();
        }

        Map<TopicPartition, Long> mapEnd = consumer.endOffsets(Arrays.asList(topicPartition));
        Iterator<Map.Entry<TopicPartition, Long>> endIterator = mapEnd.entrySet().iterator();

        long lastOffset = 0;
        while (endIterator.hasNext()) {
            Map.Entry<TopicPartition, Long> endEntry = endIterator.next();
            lastOffset = endEntry.getValue();
        }

        long expectedOffSet = lastOffset - count;
        expectedOffSet = expectedOffSet > 0 ? expectedOffSet : 1;
        logger.info("Leader of partitionId: " + partitionId + "  is " + node + ".  expectedOffSet:" + expectedOffSet
                + ",  beginOffset:" + beginOffset + ", lastOffset:" + lastOffset);

        consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(expectedOffSet)));
    }

    public static void main(String... args) throws Exception {
        Map<String, Object> kafkaParams = new HashMap<String, Object>();
        kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "cdh01:9092,cdh02:9092,cdh03:9092");
        kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, "my-consumer");
        kafkaParams.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        kafkaParams.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);

//        1. 减少 max.poll.records
//        2. 增加 session.timeout.ms
//        3. 减少 auto.commit.interval.ms
        kafkaParams.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 500);
        kafkaParams.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        kafkaParams.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");

        receiveLatestMessage(kafkaParams, "test", 5);

    }
}

pom.xml

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>           
	# 此版本没有 AdminClient
   <!-- <version>0.10.2.0</version> -->
    <version>0.11.0.2</version>
</dependency>       
可以使用 Kafka Consumer API 来消费多个 topic消息。具体实现代码如下: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <librdkafka/rdkafka.h> int main(int argc, char **argv) { rd_kafka_t *rk; /* Kafka producer instance handle */ rd_kafka_conf_t *conf; /* Temporary configuration object */ char errstr[512]; /* librdkafka API error reporting buffer */ char *brokers; /* Kafka broker(s) */ char *topics; /* Topic list to consume from */ rd_kafka_topic_partition_list_t *topic_list; /* List of topics to subscribe to */ rd_kafka_resp_err_t err; /* librdkafka API error code */ /* Check arguments */ if (argc != 3) { fprintf(stderr, "Usage: %s <broker> <topic1,topic2,...>\n", argv[]); exit(1); } brokers = argv[1]; topics = argv[2]; /* Create Kafka client configuration place-holder */ conf = rd_kafka_conf_new(); /* Set bootstrap broker(s) as a comma-separated list of * host or host:port (default port is 9092). * librdkafka will use the bootstrap brokers to acquire the full * set of brokers from the cluster. */ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { fprintf(stderr, "%s\n", errstr); exit(1); } /* Create Kafka producer instance */ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); if (!rk) { fprintf(stderr, "Failed to create Kafka producer: %s\n", errstr); exit(1); } /* Create topic list */ topic_list = rd_kafka_topic_partition_list_new(1); if (!topic_list) { fprintf(stderr, "Failed to create topic list\n"); exit(1); } /* Parse topic list */ if (rd_kafka_topic_partition_list_add(topic_list, topics, RD_KAFKA_PARTITION_UA) != topic_list->cnt) { fprintf(stderr, "Failed to parse topic list: %s\n", rd_kafka_err2str(rd_kafka_last_error())); exit(1); } /* Subscribe to topic list */ err = rd_kafka_subscribe(rk, topic_list); if (err) { fprintf(stderr, "Failed to subscribe to topic list: %s\n", rd_kafka_err2str(err)); exit(1); } /* Consume messages */ while (1) { rd_kafka_message_t *msg; /* Poll for new messages */ msg = rd_kafka_consumer_poll(rk, 100); if (!msg) { continue; } /* Print message */ printf("Received message on topic %s (partition %d) at offset %ld:\n", rd_kafka_topic_name(msg->rkt), msg->partition, msg->offset); printf("%.*s\n", (int)msg->len, (char *)msg->payload); /* Free message */ rd_kafka_message_destroy(msg); } /* Destroy topic list */ rd_kafka_topic_partition_list_destroy(topic_list); /* Destroy Kafka producer instance */ rd_kafka_destroy(rk); return ; } ``` 以上代码实现了消费多个 topic消息,具体实现过程如下: 1. 创建 Kafka client configuration place-holder。 2. 设置 bootstrap broker(s)。 3. 创建 Kafka producer instance。 4. 创建 topic list。 5. 解析 topic list。 6. 订阅 topic list。 7. 消费消息。 8. 销毁 topic list 和 Kafka producer instance。 注意:以上代码仅供参考,实际使用时需要根据具体情况进行修改。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值