文章介绍
本文主要介绍了kafka低阶api & 高阶api & 新api区别以及auyo.offset.reset的参数如何使用
文章开头展示这张图的意义在于让读者清楚看到Broker |Partition |rep 三者关系
低阶api & 高阶api & 新api区别
低阶 API 的特点
优点
● 开发者自己控制offset,想从哪里读取就从哪里读取。
● 自行控制连接分区,对分区自定义进行负载均衡
● 对 zookeeper 的依赖性降低(如:offset 不一定非要靠 zk 存储,自行存储offset 即可,比如存在文件或者内存中)
缺点
● 太过复杂,需要自行控制 offset,连接哪个分区,找到分区 leader 等
package com.csdn.kafka.consumer;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.Message;
import kafka.message.MessageAndOffset;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import util.ZkUtil;
import java.nio.ByteBuffer;
import java.util.*;
/**
* Created by ag on 2020/5/9.
*/
public class SimpleConsumerAPI {
public static void main(String[] args)throws Exception {
String zkString = "192.168.1.115:2181,192.168.1.116:2181,192.168.1.117:2181";
String broker = "192.168.1.115";
int port = 9092;
int buffersize = 64*1024;
String clientId = "clientId";
String topic = "test";
long whichTime = kafka.api.OffsetRequest.EarliestTime();
int timeout = 6000;
ZooKeeper zk = new ZooKeeper(zkString, timeout, new Watcher() {
public void process(WatchedEvent watchedEvent) {
System.out.println(watchedEvent);
}
});
List<String> partitions = zk.getChildren("/brokers/topics/test/partitions", true);
System.out.println(partitions);
for(String p:partitions){
int partition = Integer.valueOf(p);
String leader = getLeader(timeout, broker, port, partition,
buffersize, clientId, topic);
byte[] data = ZkUtil.getData("/consumers/test/testgroup/" + partition);
long readOffset = Long.valueOf(new String(data).trim());
System.out.println(readOffset);
new Thread(new ReadDataTask(timeout, port, partition, buffersize,
clientId, topic, leader, readOffset,14110)).start();
}
}
private static void fetchData(int timeout, int port, int partition, int buffersize,
String clientId, String topic, String leader, long readOffset) {
SimpleConsumer simpleConsumer = new SimpleConsumer(leader,port,timeout,buffersize,clientId);
kafka.api.FetchRequest request = new FetchRequestBuilder()
.addFetch(topic,partition,readOffset,100000)
.clientId(clientId)
.build();
FetchResponse fetch = simpleConsumer.fetch(request);
ByteBufferMessageSet messageAndOffsets = fetch.messageSet(topic, partition);
Iterator<MessageAndOffset> iterator = messageAndOffsets.iterator();
while(iterator.hasNext()){
MessageAndOffset next = iterator.next();
long offset = next.offset();
long nextoffset = next.nextOffset();
Message message = next.message();
ByteBuffer payload = message.payload();
byte[] bytes =