KafkaController.onControllerFailover
//通过这儿注册了一个监听器,感知到集群里面有新的broker进来了。
replicaStateMachine.registerListeners()
-> registerBrokerChangeListener
// 对/brokers/ids 目录设置了监听器
zkUtils.zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, brokerChangeListener)
-> brokerChangeListener = new BrokerChangeListener()
class BrokerChangeListener() extends IZkChildListener with Logging {
this.logIdent = "[BrokerChangeListener on Controller " + controller.config.brokerId + "]: "
def handleChildChange(parentPath : String, currentBrokerList : java.util.List[String]) {
info("Broker change listener fired for path %s with children %s".format(parentPath, currentBrokerList.sorted.mkString(",")))
inLock(controllerContext.controllerLock) {
if (hasStarted.get) {
ControllerStats.leaderElectionTimer.time {
try {
//获取到所有的broker
val curBrokers = currentBrokerList.map(_.toInt).toSet.flatMap(zkUtils.getBrokerInfo)
//获取到所有broker的ID号
val curBrokerIds = curBrokers.map(_.id)
//获取到所有的 live 的broker
val liveOrShuttingDownBrokerIds = controllerContext.liveOrShuttingDownBrokerIds
//新加入进来的broker
val newBrokerIds = curBrokerIds -- liveOrShuttingDownBrokerIds
// 获取到宕机了的broker
val deadBrokerIds = liveOrShuttingDownBrokerIds -- curBrokerIds
val newBrokers = curBrokers.filter(broker => newBrokerIds(broker.id))
controllerContext.liveBrokers = curBrokers
val newBrokerIdsSorted = newBrokerIds.toSeq.sorted
val deadBrokerIdsSorted = deadBrokerIds.toSeq.sorted
val liveBrokerIdsSorted = curBrokerIds.toSeq.sorted
info("Newly added brokers: %s, deleted brokers: %s, all live brokers: %s"
.format(newBrokerIdsSorted.mkString(","), deadBrokerIdsSorted.mkString(","), liveBrokerIdsSorted.mkString(",")))
newBrokers.foreach(controllerContext.controllerChannelManager.addBroker)
deadBrokerIds.foreach(controllerContext.controllerChannelManager.removeBroker)
//newBrokerIds 这个里面有的是 新注册上来的broker的号
//这个数据结构里面如果不为空,那么说明集群里面以后新的broker注册上来了
if(newBrokerIds.nonEmpty)
//TODO 这儿就是注册上来了broker,然后我们看一下如何处理的?
controller.onBrokerStartup(newBrokerIdsSorted)
//如果deadBrokerIds数据不为空,那么说明有broker宕机了。
if(deadBrokerIds.nonEmpty)
//TODO 就是对宕机了的broker进行处理
controller.onBrokerFailure(deadBrokerIdsSorted)
} catch {
case e: Throwable => error("Error while handling broker changes", e)
}
}
}
}
}
}
//TODO 这儿就是注册上来了broker,然后我们看一下如何处理的?
-> controller.onBrokerStartup(newBrokerIdsSorted)
//TODO 发送一个元数据更新的请求
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
//TODO 发元数据更新的请求给其他所有的broker
brokerRequestBatch.sendRequestsToBrokers(epoch)
//TODO 发送请求 ApiKeys.UPDATE_METADATA_KEY
controller.sendRequest(broker, ApiKeys.UPDATE_METADATA_KEY, Some(version), updateMetadataRequest, null)
=> KafkaApi. //TODO 每一台broker都会接受到元数据更新的请求
case ApiKeys.UPDATE_METADATA_KEY => handleUpdateMetadataRequest(request)
//TODO 对接受到的请求【元数据变化】进行处理
replicaManager.maybeUpdateMetadataCache(correlationId, updateMetadataRequest, metadataCache)
//TODO broker更新自己的元数据信息了。
metadataCache.updateCache(correlationId, updateMetadataRequest)
def updateCache(correlationId: Int, updateMetadataRequest: UpdateMetadataRequest) {
inWriteLock(partitionMetadataLock) {
controllerId = updateMetadataRequest.controllerId match {
case id if id < 0 => None
case id => Some(id)
}
//把以前的对应的数据结构清空
aliveNodes.clear()
aliveBrokers.clear()
//获取到 发送过来请求的里面的元数据
updateMetadataRequest.liveBrokers.asScala.foreach { broker =>
val nodes = new EnumMap[SecurityProtocol, Node](classOf[SecurityProtocol])
val endPoints = new EnumMap[SecurityProtocol, EndPoint](classOf[SecurityProtocol])
broker.endPoints.asScala.foreach { case (protocol, ep) =>
endPoints.put(protocol, EndPoint(ep.host, ep.port, protocol))
nodes.put(protocol, new Node(broker.id, ep.host, ep.port))
}
//用新的数据信息更新之前的户数的数据结构
aliveBrokers(broker.id) = Broker(broker.id, endPoints.asScala, Option(broker.rack))
aliveNodes(broker.id) = nodes.asScala
}
kafka 集群管理-如何感知新注册进来的broker
最新推荐文章于 2023-04-09 17:36:13 发布