import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.cluster.BrokerEndPoint;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.*;
// CommonUtils是一个工具类,可以自行编写
/**
* 功能描述:根据指定的topic,Partition,Offset来获取数据
*
* @version 1.0
* @author yujianrong
* @date: 2020/2/16 15:48
*/
public class LowerConsumer {
/** logger */
private static Logger log = LoggerFactory.getLogger(LowerConsumer.class);
/**
* 根据brokers,topic,partition获取leader
*
* @param conditionDTO conditionDTO
* @return BrokerEndPoint
*/
public BrokerEndPoint findLeader(KafkaConsumeConditionDTO conditionDTO) {
for (String broker : conditionDTO.getBrokerList()) {
// 创建获取分区leader的消费者对象
SimpleConsumer getLeader = new SimpleConsumer(broker, conditionDTO.getPort(), 1000, 1024 * 4, "getLeader");
// 创建一个主题元数据信息请求
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Collections.singletonList(conditionDTO.getTopic()));
// 获取主题元数据返回值
TopicMetadataResponse metadataResponse = getLeader.send(topicMetadataRequest);
// 解析元数据返回值
List<TopicMetadata> topicMetadata = metadataResponse.topicsMetadata();
// 遍历主题元数据
for (TopicMetadata topicMetadatum : topicMetadata) {
// 获取多个分区的元数据信息
List<PartitionMetadata> partitionsMetadata = topicMetadatum.partitionsMetadata();
// 遍历分区元数据
for (PartitionMetadata partitionMetadatum : partitionsMetadata) {
// 如果当前分区和遍历的分区相同,则返回leader
if (conditionDTO.getPartition() == partitionMetadatum.partitionId()) {
return partitionMetadatum.leader();
}
}
}
}
return null;
}
/**
* 获取数据
*
* @param conditionDTO conditionDTO
* @return result
* @throws UnsupportedEncodingException UnsupportedEncodingException
*/
public Map<String, Object> getData(KafkaConsumeConditionDTO conditionDTO) throws UnsupportedEncodingException {
Map<String, Object> result = new HashMap<>();
log.error("进入LowerConsumer.getData方法开始获取数据,conditionDTO=\n"+ CommonUtils.objectToString(conditionDTO));
// 获取分区leader
BrokerEndPoint leader = findLeader(conditionDTO);
if (leader == null) {
return result;
}
String leaderHost = leader.host();
// 获取数据的消费者对象
SimpleConsumer getData = new SimpleConsumer(leaderHost, conditionDTO.getPort(), 1000, 1024 * 4, "getData");
// 创建获取数据的对象
FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(conditionDTO.getTopic(), conditionDTO.getPartition(),
conditionDTO.getOffset(), 1024 * 4).build();
// 获取数据的返回值
FetchResponse fetchResponse = getData.fetch(fetchRequest);
// 解析返回值
ByteBufferMessageSet messageAndOffsets = fetchResponse.messageSet(conditionDTO.getTopic(), conditionDTO.getPartition());
long finalOffset = conditionDTO.getOffset();
List<String> data = new ArrayList<>();
// 遍历messageAndOffset
for (MessageAndOffset messageAndOffset : messageAndOffsets) {
long offset1 = messageAndOffset.offset();
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
finalOffset = offset1;
data.add(new String(bytes, "UTF-8"));
log.error(offset1 + "---" + new String(bytes, "UTF-8"));
}
result.put("topic", conditionDTO.getTopic());
result.put("offset", finalOffset);
result.put("data", data);
result.put("dataSize", data.size());
return result;
}
}
kafka 低级消费者javaapi
最新推荐文章于 2024-10-09 11:47:00 发布