JAVA通过时间来重置kafka的偏移量

配置文件:
groupId = new
setGroupId = asd
topics = kafkaTest1
servers =IP:port,IP:port
time=20200714230000

import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import ys.bigdata.config.Property;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
public class SetOffset {

private static int partitionNum;

private static String groupId;

private static String topics;

private static String servers;

private static String setTime;

/*打包的时候指定这个类*/
public static void main(String[] args) throws InterruptedException {
    groupId = Property.getProperty("groupId");
    topics = Property.getProperty("topics");
    servers = Property.getProperty("servers");
    setTime = Property.getProperty("time");
    String[] splitTopics = topics.split(",");
    long time = getTime(setTime);
    for (String topic : splitTopics) {
        partitionNum = getPartitionNum(topic);
        System.out.println(topic+":"+partitionNum);
        setOffset(time,topic);
        System.out.println(topic + ":finish");
        Thread.sleep(3000);
    }
//        getOffset();
//        getPartition();

}


private static long getTime(String time) {
    SimpleDateFormat format = new SimpleDateFormat("yyyyMMddhhmmss");
    Date date = null;
    try {
        date = format.parse(time);
        return date.getTime();
    } catch (ParseException e) {
        e.printStackTrace();
    }
    return 0;
}

private static void setOffset(long time, String topic) {
    KafkaConsumer<String,String> consumer = getConSumer();
    consumer.subscribe(Arrays.asList(topic));
    Map<TopicPartition,Long> partitionMap = new HashMap<>();
    for (int i = 0; i < partitionNum; i++) {
        TopicPartition topicPartition0 = new TopicPartition(topic,i);
        partitionMap.put(topicPartition0,time);
    }
    Map<TopicPartition,OffsetAndTimestamp> timeOffsetMap = consumer.offsetsForTimes(partitionMap);
    /**
     * 在调用seek方法的时候,需要先获得分区的信息,而分区的信息要通过poll方法来获得.
     * 如果调用seek方法时,没有分区信息,则会抛出IllegalStateException异常 No current assignment for partition xxxx.
     */
    ConsumerRecords<String, String> poll = consumer.poll(0);
//        ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(0));
    System.out.println("poll:"+poll.isEmpty());
    Set<Map.Entry<TopicPartition, OffsetAndTimestamp>> entries = timeOffsetMap.entrySet();

    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : entries) {
        if(entry.getValue() == null) {
            consumer.seek(new TopicPartition(entry.getKey().topic(),entry.getKey().partition()), 99);
            continue;
        }
        consumer.seek(new TopicPartition(entry.getKey().topic(),entry.getKey().partition()), entry.getValue().offset());
    }
    consumer.commitSync();
    consumer.close();
}

private static KafkaConsumer<String, String> getConSumer() {
    Properties props = new Properties();
    props.put("bootstrap.servers", servers);
    props.put("group.id",groupId);
    props.put("enable.auto.commit","false");
    props.put("auto.commit.interval.ms","1000");
    props.put("session.timeout.ms","30000");
    props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    return new KafkaConsumer<>(props);
}

private static int getPartitionNum(String topic) {
    KafkaConsumer<String,String> consumer = getConSumer();
    Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
    return partitionInfos.size();
}

}

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值