OffsetCommitRequest处理消费者提交的offset
def handleOffsetCommitRequest(request: RequestChannel.Request) {
val header = request.header
val offsetCommitRequest = request.body.asInstanceOf[OffsetCommitRequest]
// reject the request if not authorized to the group
if (!authorize(request.session, Read, new Resource(Group, offsetCommitRequest.groupId))) {
// 对应请求进行权限验证,验证失败返回错误码
val errorCode = new JShort(Errors.GROUP_AUTHORIZATION_FAILED.code)
val results = offsetCommitRequest.offsetData.keySet.asScala.map { topicPartition =>
(topicPartition, errorCode)
}.toMap
val responseHeader = new ResponseHeader(header.correlationId)
val responseBody = new OffsetCommitResponse(results.asJava)
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
} else {
// filter non-existent topics
// 过滤掉MetadataCache中未知的Topic对应的offset信息
val invalidRequestsInfo = offsetCommitRequest.offsetData.asScala.filter { case (topicPartition, _) =>
!metadataCache.contains(topicPartition.topic)
}
val filteredRequestInfo = offsetCommitRequest.offsetData.asScala.toMap -- invalidRequestsInfo.keys
val (authorizedRequestInfo, unauthorizedRequestInfo) = filteredRequestInfo.partition {
case (topicPartition, offsetMetadata) => authorize(request.session, Read, new Resource(Topic, topicPartition.topic))
}
// the callback for sending an offset commit response
// 定义回调函数,主要负责创建OffsetCommitResponse,并放入RequestChannel等待发送
def sendResponseCallback(commitStatus: immutable.Map[TopicPartition, Short]) {
// 日志如初操作以及集合合并
val mergedCommitStatus = commitStatus ++ unauthorizedRequestInfo.mapValues(_ => Errors.TOPIC_AUTHORIZATION_FAILED.code)
mergedCommitStatus.foreach { case (topicPartition, errorCode) =>
if (errorCode != Errors.NONE.code) {
debug(s"Offset commit request with correlation id ${header.correlationId} from client ${header.clientId} " +
s"on partition $topicPartition failed due to ${Errors.forCode(errorCode).exceptionName}")
}
}
val combinedCommitStatus = mergedCommitStatus.mapValues(new JShort(_)) ++ invalidRequestsInfo.map(_._1 -> new JShort(Errors.UNKNOWN_TOPIC_OR_PARTITION.code))
val responseHeader = new ResponseHeader(header.correlationId)
val responseBody = new OffsetCommitResponse(combinedCommitStatus.asJava)
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else if (header.apiVersion == 0) {
// 0.8以前的逻辑
// for version 0 always store offsets to ZK
val responseInfo = authorizedRequestInfo.map {
case (topicPartition, partitionData) =>
val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicPartition.topic)
try {
if (!metadataCache.hasTopicMetadata(topicPartition.topic))
(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
else if (partitionData.metadata != null && partitionData.metadata.length > config.offsetMetadataMaxSize)
(topicPartition, Errors.OFFSET_METADATA_TOO_LARGE.code)
else {
zkUtils.updatePersistentPath(s"${topicDirs.consumerOffsetDir}/${topicPartition.partition}", partitionData.offset.toString)
(topicPartition, Errors.NONE.code)
}
} catch {
case e: Throwable => (topicPartition, Errors.forException(e).code)
}
}
sendResponseCallback(responseInfo)
} else {
// for version 1 and beyond store offsets in offset manager
// compute the retention time based on the request version:
// if it is v1 or not specified by user, we can use the default retention
// 根据请求的版本号,决定记录offset的消息的超时时长
val offsetRetention =
if (header.apiVersion <= 1 ||
// 默认配置24小时
offsetCommitRequest.retentionTime == OffsetCommitRequest.DEFAULT_RETENTION_TIME)
coordinator.offsetConfig.offsetsRetentionMs
else
// 请求中指定的时长
offsetCommitRequest.retentionTime
// commit timestamp is always set to now.
// "default" expiration timestamp is now + retention (and retention may be overridden if v2)
// expire timestamp is computed differently for v1 and v2.
// - If v1 and no explicit commit timestamp is provided we use default expiration timestamp.
// - If v1 and explicit commit timestamp is provided we calculate retention from that explicit commit timestamp
// - If v2 we use the default expiration timestamp
// 根据配置的保留时间,或者每个分区指定的保留时间,计算出offset的过期清理的时间
val currentTimestamp = SystemTime.milliseconds
val defaultExpireTimestamp = offsetRetention + currentTimestamp
val partitionData = authorizedRequestInfo.mapValues { partitionData =>
val metadata = if (partitionData.metadata == null) OffsetMetadata.NoMetadata else partitionData.metadata;
new OffsetAndMetadata(// 创建OffsetAndMetadta对象
offsetMetadata = OffsetMetadata(partitionData.offset, metadata),
commitTimestamp = currentTimestamp,
expireTimestamp = {
if (partitionData.timestamp == OffsetCommitRequest.DEFAULT_TIMESTAMP)
defaultExpireTimestamp
else
offsetRetention + partitionData.timestamp
}
)
}
// call coordinator to handle commit offset
// 将请求携带的信息和offsetAndMetadat对象交给GroupCoordinator.handleCommitOffsets方法进行处理。
coordinator.handleCommitOffsets(
offsetCommitRequest.groupId,
offsetCommitRequest.memberId,
offsetCommitRequest.generationId,
partitionData,
sendResponseCallback)
}
}
}
def handleCommitOffsets(groupId: String,
memberId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicPartition, Short] => Unit) {
var delayedOffsetStore: Option[DelayedStore] = None
if (!isActive.get) {// 检测GroupCoordinator是否启动
responseCallback(offsetMetadata.mapValues(_ => Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code))
} else if (!isCoordinatorForGroup(groupId)) { 检测GroupCoordinator是否管理此consumer
responseCallback(offsetMetadata.mapValues(_ => Errors.NOT_COORDINATOR_FOR_GROUP.code))
} else if (isCoordinatorLoadingInProgress(groupId)) { 检测GroupCoordinator是否已经加载这个消费组对应的__CONSUMER_OFFSETS分区
responseCallback(offsetMetadata.mapValues(_ => Errors.GROUP_LOAD_IN_PROGRESS.code))
} else {
// 如果对应的GroupMetadata对象不存在且generationID小于0,则表示GroupCoordinator不维护Consumer Group的分区分配结果,只记录提交的offset信息
val group = groupManager.getGroup(groupId)
if (group == null) {
if (generationId < 0)
// the group is not relying on Kafka for partition management, so allow the commit
delayedOffsetStore = Some(groupManager.prepareStoreOffsets(groupId, memberId, generationId, offsetMetadata,
responseCallback))
else
// the group has failed over to this coordinator (which will be handled in KAFKA-2017),
// or this is a request coming from an older generation. either way, reject the commit
responseCallback(offsetMetadata.mapValues(_ => Errors.ILLEGAL_GENERATION.code))
} else {
group synchronized {
if (group.is(Dead)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_MEMBER_ID.code))
} else if (group.is(AwaitingSync)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.REBALANCE_IN_PROGRESS.code))
} else if (!group.has(memberId)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_MEMBER_ID.code))
} else if (generationId != group.generationId) {
responseCallback(offsetMetadata.mapValues(_ => Errors.ILLEGAL_GENERATION.code))
} else {
//将记录offset的消息追加到对应的__CONSUMER_OFFSETS分区中
val member = group.get(memberId)
completeAndScheduleNextHeartbeatExpiration(group, member)
delayedOffsetStore = Some(groupManager.prepareStoreOffsets(groupId, memberId, generationId,
offsetMetadata, responseCallback))
}
}
}
}
// store the offsets without holding the group lock
delayedOffsetStore.foreach(groupManager.store)
}