配置文件:
groupId = new
setGroupId = asd
topics = kafkaTest1
servers =IP:port,IP:port
time=20200714230000
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import ys.bigdata.config.Property;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
public class SetOffset {
private static int partitionNum;
private static String groupId;
private static String topics;
private static String servers;
private static String setTime;
/*打包的时候指定这个类*/
public static void main(String[] args) throws InterruptedException {
groupId = Property.getProperty("groupId");
topics = Property.getProperty("topics");
servers = Property.getProperty("servers");
setTime = Property.getProperty("time");
String[] splitTopics = topics.split(",");
long time = getTime(setTime);
for (String topic : splitTopics) {
partitionNum = getPartitionNum(topic);
System.out.println(topic+":"+partitionNum);
setOffset(time,topic);
System.out.println(topic + ":finish");
Thread.sleep(3000);
}
// getOffset();
// getPartition();
}
private static long getTime(String time) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMddhhmmss");
Date date = null;
try {
date = format.parse(time);
return date.getTime();
} catch (ParseException e) {
e.printStackTrace();
}
return 0;
}
private static void setOffset(long time, String topic) {
KafkaConsumer<String,String> consumer = getConSumer();
consumer.subscribe(Arrays.asList(topic));
Map<TopicPartition,Long> partitionMap = new HashMap<>();
for (int i = 0; i < partitionNum; i++) {
TopicPartition topicPartition0 = new TopicPartition(topic,i);
partitionMap.put(topicPartition0,time);
}
Map<TopicPartition,OffsetAndTimestamp> timeOffsetMap = consumer.offsetsForTimes(partitionMap);
/**
* 在调用seek方法的时候,需要先获得分区的信息,而分区的信息要通过poll方法来获得.
* 如果调用seek方法时,没有分区信息,则会抛出IllegalStateException异常 No current assignment for partition xxxx.
*/
ConsumerRecords<String, String> poll = consumer.poll(0);
// ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(0));
System.out.println("poll:"+poll.isEmpty());
Set<Map.Entry<TopicPartition, OffsetAndTimestamp>> entries = timeOffsetMap.entrySet();
for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : entries) {
if(entry.getValue() == null) {
consumer.seek(new TopicPartition(entry.getKey().topic(),entry.getKey().partition()), 99);
continue;
}
consumer.seek(new TopicPartition(entry.getKey().topic(),entry.getKey().partition()), entry.getValue().offset());
}
consumer.commitSync();
consumer.close();
}
private static KafkaConsumer<String, String> getConSumer() {
Properties props = new Properties();
props.put("bootstrap.servers", servers);
props.put("group.id",groupId);
props.put("enable.auto.commit","false");
props.put("auto.commit.interval.ms","1000");
props.put("session.timeout.ms","30000");
props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
return new KafkaConsumer<>(props);
}
private static int getPartitionNum(String topic) {
KafkaConsumer<String,String> consumer = getConSumer();
Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
return partitionInfos.size();
}
}