业务场景
IOT领域,假设有用户访问页面,只需要当前时刻的数据,不关心之前设备的数据。
所以通过获取当前kafka的broker里的最新offset,然后consumer消费最新的offset来获取最新的时刻数据。
一 simlpeAPI实现
public class GetOffsetShellWrap {
private static Logger log = LoggerFactory.getLogger(GetOffsetShellWrap.class);
private String topic;
private int port;
private String host;
private int time;
public GetOffsetShellWrap(String topic, int port, String host, int time) {
this.topic = topic;
this.port = port;
this.host = host;
this.time = time;
}
public Map<String, String> getEveryPartitionMaxOffset() {
//1.获取topic所有分区 以及每个分区的元数据 => 返回 Map<分区id,分区元数据>
TreeMap<Integer, PartitionMetadata> partitionIdAndMeta = findTopicEveryPartition();
Map<String, String> map = new HashMap<String, String>();
for (Map.Entry<Integer, PartitionMetadata> entry : partitionIdAndMeta.entrySet()) {
int leaderPartitionId = entry.getKey();
//2.根据每个分区的元数据信息 ==> 获取leader分区的主机
String leadBroker = entry.getValue().leader().host();
String clientName = "Client_" + topic + "_" + leaderPartitionId;
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port,100000, 64 * 1024, clientName);
//3.从leader主机获取分区的offset
long readOffset = getLastOffset(consumer, topic, leaderPartitionId, clientName);
map.put(String.valueOf(leaderPartitionId), String.valueOf(readOffset));
if (consumer != null) consumer.close(); }
return map;
}
private TreeMap<Integer, PartitionMetadata> findTopicEveryPartition(){
TreeMap<Integer, PartitionMetadata> map = new TreeMap<Integer, PartitionMetadata>();
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(host, port, 100000, 64 * 1024,"leaderLookup" + new Date().getTime()); List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
if(metaData!=null && !metaData.isEmpty()){
TopicMetadata item = metaData.get(0);
for (PartitionMetadata part : item.partitionsMetadata()) {
map.put(part.partitionId(), part);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (consumer != null) consumer.close();
}
return map;
}
private long getLastOffset(SimpleConsumer consumer, String topic,int leaderPartitionId, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic,leaderPartitionId);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(time, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(),clientName);
OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
log.error("Error fetching data Offset Data the Broker. Reason: "+ response.errorCode(topic, leaderPartitionId));
return 0;
}
long[] offsets = response.offsets(topic, leaderPartitionId);
return offsets[0];
}
}
主方法
public class GetOffsetShellWrapJavaTest {
public static void main(String[] args) {
int port = 9092;
String topic = "TOPIC_SPARKSTREAMING10";
int time = -1;
GetOffsetShellWrap offsetSearch = new GetOffsetShellWrap(topic,port,"192.168.1.109",time);
Map<String, String> map = offsetSearch.getEveryPartitionMaxOffset();
for (String key : map.keySet()) {
System.out.println(key+"---"+map.get(key));
}
}
}
结果–0号分区当前偏移量
二 highAPI实现
public class KafkaConsumerDemo {
private final static String TOPIC = "TOPIC_SPARKSTREAMING10";
private final static String BOOTSTRAP_SERVERS = "192.168.1.109:9092";
private static Consumer<Long, String> createConsumer() {
final Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "KafkaExampleConsumer");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
final Consumer<Long, String> consumer = new KafkaConsumer<>(props);
return consumer;
}
// 获取某个Topic的所有分区以及分区最新的Offset
public static void getPartitionsForTopic() {
final Consumer<Long, String> consumer = createConsumer();
Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(TOPIC);
System.out.println("Get the partition info as below:");
List<TopicPartition> tp =new ArrayList<TopicPartition>();
partitionInfos.forEach(str -> {
System.out.println("Partition Info:");
System.out.println(str);
tp.add(new TopicPartition(TOPIC,str.partition()));
consumer.assign(tp);
consumer.seekToEnd(tp);
System.out.println("Partition " + str.partition() + " 's latest offset is '" + consumer.position(new TopicPartition(TOPIC, str.partition())));
});
}
// 持续不断的消费数据
public static void run() throws InterruptedException {
final Consumer<Long, String> consumer = createConsumer();
consumer.subscribe(Collections.singletonList(TOPIC));
final int giveUp = 100; int noRecordsCount = 0;
while(true){
final ConsumerRecords<Long, String> consumerRecords = consumer.poll(1000);
if(consumerRecords.count()==0){
noRecordsCount++;
if(noRecordsCount > giveUp) break;
else continue;
}
// int i = 0;
consumerRecords.forEach(record -> {
// i = i + 1;
System.out.printf("Consumer Record:(%d, %s, %d, %d)\n",
record.key(), record.value(),
record.partition(), record.offset());
});
// System.out.println("Consumer Records " + i);
consumer.commitAsync();
}
consumer.close();
System.out.println("Kafka Consumer Exited");
}
public static void main(String[] args) {
new KafkaConsumerDemo().getPartitionsForTopic();
}
}
结果–0号分区当前偏移量