kafka 单一 partition 消费生成文件 demo(根据别人的做的微调,保存备用)

保存备用,非原创。
public class KafkaSinglePartitionToFile {

    public static void main(String args[]) {
        KafkaSinglePartitionToFile example = new KafkaSinglePartitionToFile();
        List<String> brokers = new ArrayList<>();
        brokers.add("kafkabroker1");

        int port = 9092;

        long maxReads = 500000;

        String topicName = "log-src";

        int partitionNumber = 0;

        String filePath = "/data/tmp/kafka-t0.txt";

        try {
            example.run(maxReads, topicName, partitionNumber, brokers, port, filePath);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    private List<String> replicaBrokers;

    public KafkaSinglePartitionToFile() {
        replicaBrokers = new ArrayList<>();
    }

    public void run(long logLimit, String topicName, int partitionNumber, List<String> brokers, int port, String fliePath) throws Exception {

        FileWriter fw = new FileWriter(fliePath, false);

        PartitionMetadata metadata = findLeader(brokers, port, topicName, partitionNumber);
        
        if (metadata == null) {
            System.out.println("Can't find metadata for Topic and Partition. Exiting");
            return;
        }
        if (metadata.leader() == null) {
            System.out.println("Can't find Leader for Topic and Partition. Exiting");
            return;
        }
        
        String leadBroker = metadata.leader().host();
        String clientName = "Client_" + topicName + "_" + partitionNumber;

        SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
        long readOffset = getLastOffset(consumer, topicName, partitionNumber, kafka.api.OffsetRequest.LatestTime(), clientName);
        
        int numErrors = 0;
        while (logLimit > 0) {
            if (consumer == null) {
                consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
            }
            
            FetchRequest req = new FetchRequestBuilder()
                    .clientId(clientName)
                    .addFetch(topicName, partitionNumber, readOffset, 10000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
                    .build();
            FetchResponse fetchResponse = consumer.fetch(req);

            if (fetchResponse.hasError()) {
                numErrors++;
                short code = fetchResponse.errorCode(topicName, partitionNumber);
                System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
                if (numErrors > 3) break;
                if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                    readOffset = getLastOffset(consumer, topicName, partitionNumber, kafka.api.OffsetRequest.LatestTime(), clientName);
                    continue;
                }
                consumer.close();
                consumer = null;
                leadBroker = findNewLeader(leadBroker, topicName, partitionNumber, port);
                continue;
            }
            
            numErrors = 0;

            long numRead = 0;
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topicName, partitionNumber)) {
                long currentOffset = messageAndOffset.offset();
                if (currentOffset < readOffset) {
                    System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
                    continue;
                }
                readOffset = messageAndOffset.nextOffset();
                ByteBuffer payload = messageAndOffset.message().payload();

                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);

                String str = new String(bytes, "UTF-8");
                fw.write(str);
                fw.write(System.lineSeparator());

                if (logLimit % 1000 == 0) {
                    System.out.println(logLimit);
                    fw.flush();
                }

                numRead++;
                logLimit--;
            }

            if (numRead == 0) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException ie) {
                }
            }
        }

        fw.close();
        consumer.close();
    }

    public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
        
        OffsetResponse response = consumer.getOffsetsBefore(request);

        if (response.hasError()) {
            System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
            return 0;
        }
        
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }

    private String findNewLeader(String oldLeader, String topicName, int partitionNumber, int port) throws Exception {
        PartitionMetadata metadata = findLeader(replicaBrokers, port, topicName, partitionNumber);
        if (metadata != null && metadata.leader() != null && !oldLeader.equalsIgnoreCase(metadata.leader().host())){
            return metadata.leader().host();
        } else {
            System.out.println("Unable to find new leader after Broker failure. Exiting");
            throw new Exception("Unable to find new leader after Broker failure. Exiting");
        }
    }

    private PartitionMetadata findLeader(List<String> brokers, int port, String topicName, int partitionNumber) {
        PartitionMetadata returnMetaData = null;
        loop:
        for (String broker : brokers) {
            SimpleConsumer consumer = null;
            try {
                consumer = new SimpleConsumer(broker, port, 100000, 64 * 1024, "leaderLookup");
                
                List<String> topics = Collections.singletonList(topicName);
                TopicMetadataRequest req = new TopicMetadataRequest(topics);
                kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

                List<TopicMetadata> metaData = resp.topicsMetadata();
                for (TopicMetadata item : metaData) {
                    for (PartitionMetadata part : item.partitionsMetadata()) {
                        if (part.partitionId() == partitionNumber) {
                            returnMetaData = part;
                            break loop;
                        }
                    }
                }
            } catch (Exception e) {
                System.out.println("Error communicating with Broker [" + broker + "] to find Leader for [" + topicName
                        + ", " + partitionNumber + "] Reason: " + e);
            } finally {
                if (consumer != null) consumer.close();
            }
        }
        if (returnMetaData != null) {
            replicaBrokers.clear();
            for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
                replicaBrokers.add(replica.host());
            }
        }
        return returnMetaData;
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值