KafkaApi.handleFetchRequest
//拉取数据
replicaManager.fetchMessages
// 从本地的磁盘里面去读取日志信息
val logReadResults = readFromLocalLog
def readFromLocalLog(fetchOnlyFromLeader: Boolean,
readOnlyCommitted: Boolean,
fetchMaxBytes: Int,
hardMaxBytesLimit: Boolean,
readPartitionInfo: Seq[(TopicAndPartition, PartitionFetchInfo)],
quota: ReplicaQuota): Seq[(TopicAndPartition, LogReadResult)] = {
//一个分区一个分区去读取的
readPartitionInfo.foreach { case (tp, fetchInfo) =>
//TODO 调用read方法
val readResult = read(tp, fetchInfo, limitBytes, minOneMessage)
val messageSetSize = readResult.info.messageSet.sizeInBytes
// Once we read from a non-empty partition, we stop ignoring request and partition level size limits
if (messageSetSize > 0)
minOneMessage = false
limitBytes = math.max(0, limitBytes - messageSetSize)
result += (tp -> readResult)
}
// 调用read方法
-> val readResult = read(tp, fetchInfo, limitBytes, minOneMessage)
def read(tp: TopicAndPartition, fetchInfo: PartitionFetchInfo, limitBytes: Int, minOneMessage: Boolean): LogReadResult = {
val TopicAndPartition(topic, partition) = tp
val PartitionFetchInfo(offset, fetchSize) = fetchInfo
BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark()
try {
trace(s"Fetching log segment for partition $tp, offset ${offset}, partition fetch size ${fetchSize}, " +
s"remaining response limit ${limitBytes}" +
(if (minOneMessage) s", ignoring response/partition size limits" else ""))
// decide whether to only fetch from leader
//获取到leader partition
val localReplica = if (fetchOnlyFromLeader)
getLeaderReplicaIfLocal(topic, partition)
else
getReplicaOrException(topic, partition)
// decide whether to only fetch committed data (i.e. messages below high watermark)
val maxOffsetOpt = if (readOnlyCommitted)
Some(localReplica.highWatermark.messageOffset)
else
None
/* Read the LogOffsetMetadata prior to performing the read from the log.
* We use the LogOffsetMetadata to determine if a particular replica is in-sync or not.
* Using the log end offset after performing the read can lead to a race condition
* where data gets appended to the log immediately after the replica has consumed from it
* This can cause a replica to always be out of sync.
*/
val initialLogEndOffset = localReplica.logEndOffset
val logReadInfo = localReplica.log match {
case Some(log) =>
val adjustedFetchSize = math.min(fetchSize, limitBytes)
// Try the read first, this tells us whether we need all of adjustedFetchSize for this partition
//TODO 通过log对象去读取数据
val fetch = log.read(offset, adjustedFetchSize, maxOffsetOpt, minOneMessage)
//TODO 通过log对象去读取数据
--> val fetch = log.read(offset, adjustedFetchSize, maxOffsetOpt, minOneMessage)
//TODO 核心代码,通过segment读取磁盘上面的数据
val fetchInfo = entry.getValue.read(startOffset, maxOffset, maxLength, maxPosition, minOneMessage)
---> //TODO log.read
FetchDataInfo(offsetMetadata, log.read(startPosition.position, length),
firstMessageSetIncomplete = adjustedMaxSize < messageSetSize)
def read(position: Int, size: Int): FileMessageSet = {
if(position < 0)
throw new IllegalArgumentException("Invalid position: " + position)
if(size < 0)
throw new IllegalArgumentException("Invalid size: " + size)
//最后返回的就是这样的一个对象,FileMessageSet,里面封装的就是我们要读取的信息
//要抓去的信息。
new FileMessageSet(file,
channel,
//拉取数据的时候,起始位置
start = this.start + position,
//结束位置
end = {
// Handle the integer overflow
if (this.start + position + size < 0)
sizeInBytes()
else
math.min(this.start + position + size, sizeInBytes())
})
}
kafka 副本同步-Leader处理fetch请求
最新推荐文章于 2024-06-08 16:41:55 发布