Spark-LiveListenerBus 源码解析
LiveListenerBus
这个是Spark的 事件总线,定义了事件与监听器的映射关系,并不直接处理事件。而是把事件传递给专门的监听器处理。
class LiveListenerBus
和这个类交互的主要有SparkListenerInterface和SparkListenerEvent这2个接口和实现类。这2个接口分别是 监听器实体和监听事件的实体 接口。
外部使用这个对象的时候只有5个方法可以使用:
1.addToSharedQueue
2.addToManagementQueue
3.addToStatusQueue
4.addToEventLogQueue
5.addToQueue
前4个方法都是在 增加 监听器,最后一个方式是 发布事件。
下面是这个类的源码解读。
private[spark] class LiveListenerBus(conf: SparkConf) {
import LiveListenerBus._
private var sparkContext: SparkContext = _
private[spark] val metrics = new LiveListenerBusMetrics(conf) //自己的 count 和 map timer 的metrics 类
// Indicate if `start()` is called
private val started = new AtomicBoolean(false)
// Indicate if `stop()` is called
private val stopped = new AtomicBoolean(false)
/** A counter for dropped events. It will be reset every time we log it. */
private val droppedEventsCounter = new AtomicLong(0L)
/** When `droppedEventsCounter` was logged last time in milliseconds. */
@volatile private var lastReportTimestamp = 0L
private val queues = new CopyOnWriteArrayList[AsyncEventQueue]() //看作一个保存AsyncEventQueue对象的array 主要保存大类的queue shared、executorManagement、appStatus、eventLog
// Visible for testing.
//在 未启动 LiveListenerBus 的时候,暂时保存 SparkListenerEvent 事件
@volatile private[scheduler] var queuedEvents = new mutable.ListBuffer[SparkListenerEvent]()
/**
* 一共有 4 中类型的 queue
* shared、executorManagement、appStatus、eventLog
*
*/
/** Add a listener to queue shared by all non-internal listeners. */
def addToSharedQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, SHARED_QUEUE)
}
/** Add a listener to the executor management queue. */
def addToManagementQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, EXECUTOR_MANAGEMENT_QUEUE)
}
/** Add a listener to the application status queue. */
def addToStatusQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, APP_STATUS_QUEUE)
}
/** Add a listener to the event log queue. */
def addToEventLogQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, EVENT_LOG_QUEUE)
}
/**
* Add a listener to a specific queue, creating a new queue if needed. Queues are independent
* of each other (each one uses a separate thread for delivering events), allowing slower
* listeners to be somewhat isolated from others.
*/
// 增加 SparkListenerInterface 的 事件到 queue
private[spark] def addToQueue(
listener: SparkListenerInterface,
queue: String): Unit = synchronized {
if (stopped.get()) { //未 开始 可 开始 都不会报错,只有 stop后才会 报错 所以在使用这个 对象的时候,还没有start的时候 一直可以addToEventLogQueue 给它
throw new IllegalStateException("LiveListenerBus is stopped.")
}
// 一共有 4 中类型的 queue shared、executorManagement、appStatus、eventLog
queues.asScala.find(_.name == queue) match {
case Some(queue) => //如果queues中已经存在这个 queue 则只会这个queue add 这个 listener
queue.addListener(listener)
case None => //如果queues中不存在这个 queue ,则会使用这个queue new 一个AsyncEventQueue 对象,这个新对象 add 这个 listener ,最后把这个AsyncEventQueue 对象 加到 queues
val newQueue = new AsyncEventQueue(queue, conf, metrics, this)
newQueue.addListener(listener) //addListener 会保存到 ListenerBus 接口的 listenersPlusTimers 中去
if (started.get()) {
newQueue.start(sparkContext)
}
queues.add(newQueue)
}
}
//移除 一个listener 从所有的queues中 和 stop掉 已经 空 的queues
def removeListener(listener: SparkListenerInterface): Unit = synchronized {
// Remove listener from all queues it was added to, and stop queues that have become empty.
queues.asScala
.filter { queue =>
queue.removeListener(listener)
queue.listeners.isEmpty()
}
.foreach { toRemove =>
if (started.get() && !stopped.get()) {
toRemove.stop()
}
queues.remove(toRemove)
}
}
/** Post an event to all queues. */
//投递一个 event 到所有的queues
def post(event: SparkListenerEvent): Unit = {
if (stopped.get()) {
return
}
// count metrics +1
metrics.numEventsPosted.inc()
// If the event buffer is null, it means the bus has been started and we can avoid
// synchronization and post events directly to the queues. This should be the most
// common case during the life of the bus.
if (queuedEvents == null) { //如果 queuedEvents is null 说明 LiveListenerBus 已经start了,所以可以把 event 直接投到 Queues 里面去
postToQueues(event)
return
}
// Otherwise, need to synchronize to check whether the bus is started, to make sure the thread
// calling start() picks up the new event.
synchronized {
if (!started.get()) {
queuedEvents += event //如果 还未 start LiveListenerBus queuedEvents 中增加 event
return
}
}
// If the bus was already started when the check above was made, just post directly to the
// queues.
//把 event 直接投到 Queues 里面去
postToQueues(event)
}
//post event 到 所有的queues
private def postToQueues(event: SparkListenerEvent): Unit = {
val it = queues.iterator()
while (it.hasNext()) {
it.next().post(event) //投递到 AsyncEventQueue 对象中去
}
}
/**
* Start sending events to attached listeners.
*
* This first sends out all buffered events posted before this listener bus has started, then
* listens for any additional events asynchronously while the listener bus is still running.
* This should only be called once.
*
* @param sc Used to stop the SparkContext in case the listener thread dies.
*/
//启动这个 LiveListenerBus
def start(sc: SparkContext, metricsSystem: MetricsSystem): Unit = synchronized {
if (!started.compareAndSet(false, true)) {
throw new IllegalStateException("LiveListenerBus already started.")
}
this.sparkContext = sc
queues.asScala.foreach { q =>
q.start(sc) // 启动 AsyncEventQueue 即调用 AsyncEventQueue 的start 方法 , 这个LiveListenerBus 必然是add 所有的 listener 之后 在启动的,所以 会把所有类型的的listener都启动的
queuedEvents.foreach(q.post) //把未启动时 保存的 事件 投递出去给所有的 queue (4中类型)
}
queuedEvents = null //暂存 置 null
metricsSystem.registerSource(metrics) // 把metrics 注册到 metricsSystem
}
/**
* For testing only. Wait until there are no more events in the queue, or until the specified
* time has elapsed. Throw `TimeoutException` if the specified time elapsed before the queue
* emptied.
* Exposed for testing.
*/
@throws(classOf[TimeoutException])
def waitUntilEmpty(timeoutMillis: Long): Unit = {
val deadline = System.currentTimeMillis + timeoutMillis
queues.asScala.foreach { queue =>
if (!queue.waitUntilEmpty(deadline)) {
throw new TimeoutException(s"The event queue is not empty after $timeoutMillis ms.")
}
}
}
/**
* Stop the listener bus. It will wait until the queued events have been processed, but drop the
* new events after stopping.
*/
def stop(): Unit = {
if (!started.get()) {
throw new IllegalStateException(s"Attempted to stop bus that has not yet started!")
}
if (!stopped.compareAndSet(false, true)) {
return
}
synchronized {
queues.asScala.foreach(_.stop())
queues.clear()
}
}
// For testing only.
private[spark] def findListenersByClass[T <: SparkListenerInterface : ClassTag](): Seq[T] = {
queues.asScala.flatMap { queue => queue.findListenersByClass[T]() }
}
// For testing only.
//返回 所有的 SparkListenerInterface
private[spark] def listeners: JList[SparkListenerInterface] = {
queues.asScala.flatMap(_.listeners.asScala).asJava
}
// For testing only.
//返回所有的 active 的 queue
private[scheduler] def activeQueues(): Set[String] = {
queues.asScala.map(_.name).toSet
}
}
object LiveListenerBus
这个伴生对象主要 设置了一些常量。
private[spark] object LiveListenerBus {
// Allows for Context to check whether stop() call is made within listener thread
val withinListenerThread: DynamicVariable[Boolean] = new DynamicVariable[Boolean](false)
private[scheduler] val SHARED_QUEUE = "shared"
private[scheduler] val APP_STATUS_QUEUE = "appStatus"
private[scheduler] val EXECUTOR_MANAGEMENT_QUEUE = "executorManagement"
private[scheduler] val EVENT_LOG_QUEUE = "eventLog"
}
LiveListenerBusMetrics
这个类主要是封装了一下MetricRegistry类。
private[spark] class LiveListenerBusMetrics(conf: SparkConf)
extends Source with Logging {
override val sourceName: String = "LiveListenerBus"
override val metricRegistry: MetricRegistry = new MetricRegistry //JVM上的实时监控类库
/**
* The total number of events posted to the LiveListenerBus. This is a count of the total number
* of events which have been produced by the application and sent to the listener bus, NOT a
* count of the number of events which have been processed and delivered to listeners (or dropped
* without being delivered).
*/
// Counter 计数器 name 是 numEventsPosted
val numEventsPosted: Counter = metricRegistry.counter(MetricRegistry.name("numEventsPosted"))
// Guarded by synchronization.
//Timer 计时器 的 map
private val perListenerClassTimers = mutable.Map[String, Timer]()
/**
* Returns a timer tracking the processing time of the given listener class.
* events processed by that listener. This method is thread-safe.
*/
def getTimerForListenerClass(cls: Class[_ <: SparkListenerInterface]): Option[Timer] = {
synchronized {
val className = cls.getName
val maxTimed: Int = conf.get(LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED) // spark.scheduler.listenerbus.metrics.maxListenerClassesTimed 默认值 128
perListenerClassTimers.get(className).orElse {
if (perListenerClassTimers.size == maxTimed) { //perListenerClassTimers的长度 超过默认值 则会 返回None
logError(s"Not measuring processing time for listener class $className because a " +
s"maximum of $maxTimed listener classes are already timed.")
None
} else {
perListenerClassTimers(className) =
metricRegistry.timer(MetricRegistry.name("listenerProcessingTime", className)) //注册这个 timer 并且保存到perListenerClassTimers map 中
perListenerClassTimers.get(className)
}
}
}
}
}
AsyncEventQueue
这个类的作用是 保存event事件 和 listener 监听器对象,开启另外一个线程完成 事件投递到listener 监听器、完成事件的处理。
class AsyncEventQueue
这个类继承了接口 SparkListenerBus,而SparkListenerBus 又继承了ListenerBus这个接口。在SparkListenerBus 中主要 重写了 doPostEvent 的方法,完成了事件Event需要给哪一个Listener 处理的功能。而ListenerBus的接口的主要作用 保存Listener对象 处理Event事件。
private class AsyncEventQueue(
val name: String,
conf: SparkConf,
metrics: LiveListenerBusMetrics,
bus: LiveListenerBus)
extends SparkListenerBus
with Logging {
import AsyncEventQueue._
// Cap the capacity of the queue so we get an explicit error (rather than an OOM exception) if
// it's perpetually being added to more quickly than it's being drained.
//内部 event 保存队列
private val eventQueue = new LinkedBlockingQueue[SparkListenerEvent](
conf.get(LISTENER_BUS_EVENT_QUEUE_CAPACITY)) // spark.scheduler.listenerbus.eventqueue.capacity 默认10000
// Keep the event count separately, so that waitUntilEmpty() can be implemented properly;
// this allows that method to return only when the events in the queue have been fully
// processed (instead of just dequeued).
private val eventCount = new AtomicLong() //事件数 统计
/** A counter for dropped events. It will be reset every time we log it. */
private val droppedEventsCounter = new AtomicLong(0L)
/** When `droppedEventsCounter` was logged last time in milliseconds. */
@volatile private var lastReportTimestamp = 0L
private val logDroppedEvent = new AtomicBoolean(false)
private var sc: SparkContext = null
private val started = new AtomicBoolean(false)
private val stopped = new AtomicBoolean(false)
private val droppedEvents = metrics.metricRegistry.counter(s"queue.$name.numDroppedEvents") // metric count 计数器
private val processingTime = metrics.metricRegistry.timer(s"queue.$name.listenerProcessingTime") //metric timer Timer 计时器
// Remove the queue size gauge first, in case it was created by a previous incarnation of
// this queue that was removed from the listener bus.
metrics.metricRegistry.remove(s"queue.$name.size")
metrics.metricRegistry.register(s"queue.$name.size", new Gauge[Int] { //注册 queue.$name.size 的 Gauges 度量
override def getValue: Int = eventQueue.size()
})
//内部 线程 消费eventQueue 的SparkListenerEvent
private val dispatchThread = new Thread(s"spark-listener-group-$name") {
setDaemon(true)
override def run(): Unit = Utils.tryOrStopSparkContext(sc) {
dispatch()
}
}
//线程中的 处理event逻辑
private def dispatch(): Unit = LiveListenerBus.withinListenerThread.withValue(true) {
var next: SparkListenerEvent = eventQueue.take()
while (next != POISON_PILL) { // POISON_PILL这个是 Stop 的 标志
val ctx: Timer.Context = processingTime.time()
try {
super.postToAll(next) //父类的处理方法
} finally {
ctx.stop()
}
eventCount.decrementAndGet()
next = eventQueue.take()
}
eventCount.decrementAndGet()
}
override protected def getTimer(listener: SparkListenerInterface): Option[Timer] = {
metrics.getTimerForListenerClass(listener.getClass.asSubclass(classOf[SparkListenerInterface]))
}
/**
* Start an asynchronous thread to dispatch events to the underlying listeners.
*
* @param sc Used to stop the SparkContext in case the async dispatcher fails.
*/
//启动方法 在 LiveListenerBus 的start 方法中启动
private[scheduler] def start(sc: SparkContext): Unit = {
if (started.compareAndSet(false, true)) {
this.sc = sc
dispatchThread.start() //启动 dispatch 线程处理
} else {
throw new IllegalStateException(s"$name already started!")
}
}
/**
* Stop the listener bus. It will wait until the queued events have been processed, but new
* events will be dropped.
*/
private[scheduler] def stop(): Unit = {
if (!started.get()) {
throw new IllegalStateException(s"Attempted to stop $name that has not yet started!")
}
if (stopped.compareAndSet(false, true)) {
eventCount.incrementAndGet()
eventQueue.put(POISON_PILL)
}
// this thread might be trying to stop itself as part of error handling -- we can't join
// in that case.
if (Thread.currentThread() != dispatchThread) {
dispatchThread.join()
}
}
//投递 事件
def post(event: SparkListenerEvent): Unit = {
if (stopped.get()) {
return
}
eventCount.incrementAndGet() //事件数统计 +1
if (eventQueue.offer(event)) { // 如果 队列已满 则会 直接返回false ,队列未满 event 入 队列
return
}
eventCount.decrementAndGet() //队列已满 事件数统计 -1
droppedEvents.inc() //droppedEvents +1 metric count 计数器 +1
droppedEventsCounter.incrementAndGet() //droppedEventsCounter +1
if (logDroppedEvent.compareAndSet(false, true)) {
// Only log the following message once to avoid duplicated annoying logs.
logError(s"Dropping event from queue $name. " +
"This likely means one of the listeners is too slow and cannot keep up with " +
"the rate at which tasks are being started by the scheduler.")
}
logTrace(s"Dropping event $event")
val droppedCount = droppedEventsCounter.get
if (droppedCount > 0) { //drop event 处理
// Don't log too frequently
//第2次开始 60 秒报告一次
if (System.currentTimeMillis() - lastReportTimestamp >= 60 * 1000) {
// There may be multiple threads trying to decrease droppedEventsCounter.
// Use "compareAndSet" to make sure only one thread can win.
// And if another thread is increasing droppedEventsCounter, "compareAndSet" will fail and
// then that thread will update it.
if (droppedEventsCounter.compareAndSet(droppedCount, 0)) {
val prevLastReportTimestamp = lastReportTimestamp
lastReportTimestamp = System.currentTimeMillis()
val previous = new java.util.Date(prevLastReportTimestamp)
logWarning(s"Dropped $droppedCount events from $name since $previous.")
}
}
}
}
/**
* For testing only. Wait until there are no more events in the queue.
*
* @return true if the queue is empty.
*/
def waitUntilEmpty(deadline: Long): Boolean = {
while (eventCount.get() != 0) {
if (System.currentTimeMillis > deadline) {
return false
}
Thread.sleep(10)
}
true
}
override def removeListenerOnError(listener: SparkListenerInterface): Unit = {
// the listener failed in an unrecoverably way, we want to remove it from the entire
// LiveListenerBus (potentially stopping a queue if it is empty)
bus.removeListener(listener)
}
}
object AsyncEventQueue
这个伴生对象只是定义了一个 特殊的SparkListenerEvent对象为POISON_PILL,用来stop dispather 线程的。即dispather 会检测 队列中出现POISON_PILL的Event的时候,会自动停止dispather 线程。
private object AsyncEventQueue {
val POISON_PILL = new SparkListenerEvent() { }
}
SparkListenerBus
AsyncEventQueue继承了SparkListenerBus这个接口,同时SparkListenerBus这个接口又继承了ListenerBus的接口。SparkListenerBus对于自己的父接口ListenerBus来说,只是重写这个doPostEvent这个方法,这个方法的作用是 把Event事件 投递给 对应的 Listener 来处理。
private[spark] trait SparkListenerBus
extends ListenerBus[SparkListenerInterface, SparkListenerEvent] {
protected override def doPostEvent(
listener: SparkListenerInterface,
event: SparkListenerEvent): Unit = {
event match {
case stageSubmitted: SparkListenerStageSubmitted =>
listener.onStageSubmitted(stageSubmitted)
case stageCompleted: SparkListenerStageCompleted =>
listener.onStageCompleted(stageCompleted)
case jobStart: SparkListenerJobStart =>
listener.onJobStart(jobStart)
case jobEnd: SparkListenerJobEnd =>
listener.onJobEnd(jobEnd)
case taskStart: SparkListenerTaskStart =>
listener.onTaskStart(taskStart)
case taskGettingResult: SparkListenerTaskGettingResult =>
listener.onTaskGettingResult(taskGettingResult)
case taskEnd: SparkListenerTaskEnd =>
listener.onTaskEnd(taskEnd)
case environmentUpdate: SparkListenerEnvironmentUpdate =>
listener.onEnvironmentUpdate(environmentUpdate)
case blockManagerAdded: SparkListenerBlockManagerAdded =>
listener.onBlockManagerAdded(blockManagerAdded)
case blockManagerRemoved: SparkListenerBlockManagerRemoved =>
listener.onBlockManagerRemoved(blockManagerRemoved)
case unpersistRDD: SparkListenerUnpersistRDD =>
listener.onUnpersistRDD(unpersistRDD)
case applicationStart: SparkListenerApplicationStart =>
listener.onApplicationStart(applicationStart)
case applicationEnd: SparkListenerApplicationEnd =>
listener.onApplicationEnd(applicationEnd)
case metricsUpdate: SparkListenerExecutorMetricsUpdate =>
listener.onExecutorMetricsUpdate(metricsUpdate)
case executorAdded: SparkListenerExecutorAdded =>
listener.onExecutorAdded(executorAdded)
case executorRemoved: SparkListenerExecutorRemoved =>
listener.onExecutorRemoved(executorRemoved)
case executorBlacklisted: SparkListenerExecutorBlacklisted =>
listener.onExecutorBlacklisted(executorBlacklisted)
case executorUnblacklisted: SparkListenerExecutorUnblacklisted =>
listener.onExecutorUnblacklisted(executorUnblacklisted)
case nodeBlacklisted: SparkListenerNodeBlacklisted =>
listener.onNodeBlacklisted(nodeBlacklisted)
case nodeUnblacklisted: SparkListenerNodeUnblacklisted =>
listener.onNodeUnblacklisted(nodeUnblacklisted)
case blockUpdated: SparkListenerBlockUpdated =>
listener.onBlockUpdated(blockUpdated)
case speculativeTaskSubmitted: SparkListenerSpeculativeTaskSubmitted =>
listener.onSpeculativeTaskSubmitted(speculativeTaskSubmitted)
case _ => listener.onOtherEvent(event)
}
}
}
ListenerBus
这个接口 是Spark总线的总接口,里面主要保存 Listener对象,Event事件投递到Listener的处理流程规范,并且面向接口编程,流出对应的方法供子类实现。
private[spark] trait ListenerBus[L <: AnyRef, E] extends Logging {
private[this] val listenersPlusTimers: CopyOnWriteArrayList[(L, Option[Timer])] = new CopyOnWriteArrayList[(L, Option[Timer])] //LiveListenerBus增加的 listener 都是保存在这里的
// Marked `private[spark]` for access in tests.
private[spark] def listeners = listenersPlusTimers.asScala.map(_._1).asJava
/**
* Returns a CodaHale metrics Timer for measuring the listener's event processing time.
* This method is intended to be overridden by subclasses.
*/
protected def getTimer(listener: L): Option[Timer] = None
/**
* Add a listener to listen events. This method is thread-safe and can be called in any thread.
*/
//增加 listener 到 listenersPlusTimers
final def addListener(listener: L): Unit = {
listenersPlusTimers.add((listener, getTimer(listener)))
}
/**
* Remove a listener and it won't receive any events. This method is thread-safe and can be called
* in any thread.
*/
//移除 listener 从 listenersPlusTimers
final def removeListener(listener: L): Unit = {
listenersPlusTimers.asScala.find(_._1 eq listener).foreach { listenerAndTimer =>
listenersPlusTimers.remove(listenerAndTimer)
}
}
/**
* This can be overriden by subclasses if there is any extra cleanup to do when removing a
* listener. In particular AsyncEventQueues can clean up queues in the LiveListenerBus.
*/
def removeListenerOnError(listener: L): Unit = {
removeListener(listener)
}
/**
* Post the event to all registered listeners. The `postToAll` caller should guarantee calling
* `postToAll` in the same thread for all events.
*/
//处理 listenersPlusTimers 中的 listener
def postToAll(event: E): Unit = {
// JavaConverters can create a JIterableWrapper if we use asScala.
// However, this method will be called frequently. To avoid the wrapper cost, here we use
// Java Iterator directly.
val iter = listenersPlusTimers.iterator //获取 listener 的 iter
while (iter.hasNext) {
val listenerAndMaybeTimer = iter.next()
val listener = listenerAndMaybeTimer._1
val maybeTimer = listenerAndMaybeTimer._2
val maybeTimerContext = if (maybeTimer.isDefined) {
maybeTimer.get.time()
} else {
null
}
try {
doPostEvent(listener, event) //子类 SparkListenerBus 实现了这个方法
if (Thread.interrupted()) {
// We want to throw the InterruptedException right away so we can associate the interrupt
// with this listener, as opposed to waiting for a queue.take() etc. to detect it.
throw new InterruptedException()
}
} catch {
case ie: InterruptedException =>
logError(s"Interrupted while posting to ${Utils.getFormattedClassName(listener)}. " +
s"Removing that listener.", ie)
removeListenerOnError(listener)
case NonFatal(e) =>
logError(s"Listener ${Utils.getFormattedClassName(listener)} threw an exception", e)
} finally {
if (maybeTimerContext != null) {
maybeTimerContext.stop()
}
}
}
}
/**
* Post an event to the specified listener. `onPostEvent` is guaranteed to be called in the same
* thread for all listeners.
*/
//需要子类实现
protected def doPostEvent(listener: L, event: E): Unit
private[spark] def findListenersByClass[T <: L : ClassTag](): Seq[T] = {
val c = implicitly[ClassTag[T]].runtimeClass
listeners.asScala.filter(_.getClass == c).map(_.asInstanceOf[T]).toSeq
}
}
SparkContext 中LiveListenerBus的使用
//实例化对象
_listenerBus = new LiveListenerBus(_conf)
_statusStore = AppStatusStore.createLiveStore(conf)
listenerBus.addToStatusQueue(_statusStore.listener.get)
//SparkEnv中
val blockManagerMaster = new BlockManagerMaster(
registerOrLookupEndpoint(
BlockManagerMaster.DRIVER_ENDPOINT_NAME, // BlockManagerMaster.DRIVER_ENDPOINT_NAME = BlockManagerMaster
new BlockManagerMasterEndpoint(rpcEnv, isLocal, conf, listenerBus)),
conf, isDriver)
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
_env.blockManager.master))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls))
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
总结
LiveListenerBus 消息总线 是应用 Listener和Event的模型,完成过程事件的一个统一的调度和处理。具体的有哪些过程可以参考 SparkListenerBus 部分。通过以上代码的解读可以知道 每一个Event在每一个Listener中都会有 对应的处理,只不过有的处理是 空处理而已。