1, 在sparkContext初始时会执行
// Create and startthe scheduler,这个master是在sparkSubmit.Main方法得到:spark://luyl152:7077,luyl153:7077,luyl154:7077
//如果集群管理器是standalone模式:该方法返回(SparkDeploySchedulerBackend,TaskSchedulerImpl)
val (sched, ts)= SparkContext.createTaskScheduler(this, master)
2,当前集群管理器是Standalone,通过正则匹配,返回(SparkDeploySchedulerBackend,TaskSchedulerImpl)
/**
* Create a task scheduler based on agiven master URL.
* Return a 2-tuple of the schedulerbackend and the task scheduler.
* 该方法是由sparkContext初始化时调用的,创建tastScheduler,并将SchedulerBackend也返回
* master: spark://luyl152:7077,luyl153:7077,luyl154:7077
* 如果standalone模式:该方法返回(SparkDeploySchedulerBackend,TaskSchedulerImpl)
*/
private def createTaskScheduler(
sc: SparkContext,
master: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks onfailure.
val MAX_LOCAL_TASK_FAILURES= 1
master match{
。。。。
//standalone会匹配这个"""spark://(.*)""".r
case SPARK_REGEX(sparkUrl)=>
//master到sparkUrl变成luyl152:7077,luyl153:7077,luyl154:7077
/**
* 它是这么实现的,pattern 就是"""spark://(.*)""".r ,这个s是匹配的串
* val m = pattern matcher s
if (runMatcher(m)) Some((1 tom.groupCount).toList map m.group)
* 其中runMatcher(m)实际上就是调用了Matcher.matches(),groupCount()得到的值是正则的括号数加1,索引从0开始,m.group(0)就是整个串
* m.group(1)就对应正则的第一个括号串。
* 1 to 1 一个左右都闭合的,所以,SPARK_REGEX(sparkUrl),正则里面的一个参数对应第一组,第二个参数对应第二组
*/
val scheduler = new TaskSchedulerImpl(sc)
//masterUrls :Array("spark://luyl152:7077","spark://luyl153:7077","spark://luyl154:7077")
val masterUrls= sparkUrl.split(",").map("spark://" + _)
//在standalone中调度类CoarseGrainedSchedulerBackend,由它的子类SparkDeploySchedulerBackend做调度
val backend= new SparkDeploySchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
3,TaskSchedulerImpl初始化过程:
*
* 通过SchedulerBackend它是调度系统的后端接口,不同的集群管理器有不同的调度类,如standalone:CoarseGrainedSchedulerBackend.
每种调度下面都可以使用:TaskSchedulerImpl来执行具体的任务。 它处理常见的逻辑问题,例如确定各个工作的调度顺序,唤醒发起投机任务等。
客户端应该首先调用initialize()和start(),然后通过runTasks()提交任务集。
线程级别:SchedulerBackends和任务提交客户端可以从多个线程调用这个类,所以它需要在公共API方法中lock以维持其状态。 另外,当他们想在这里发送事件时,
一些SchedulerBackends会自动进行同步,然后获取对我们的Lock,所以我们需要确保在我们Lock自己的同时,我们不会尝试Lock后端。不然肯定会死锁
*/
private[spark] class TaskSchedulerImpl(
val sc:SparkContext,
val maxTaskFailures: Int, //默认重试3次
isLocal: Boolean= false)
extends TaskScheduler withLogging
{
//在放弃工作之前的单个任务失败次数。 应该大于或等于1.允许的重试次数=此值 - 1。
def this(sc: SparkContext) = this(sc, sc.conf.getInt("spark.task.maxFailures", 4))
val conf= sc.conf
// How often to check for speculative tasks
//默认使用100ms的时间周期去检测投机任务
val SPECULATION_INTERVAL_MS= conf.getTimeAsMs("spark.speculation.interval", "100ms")
//单线程的调度线程池
private val speculationScheduler =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("task-scheduler-speculation")
// Threshold above which we warn user initial TaskSet maybe starved
//超过此阈值,我们警告用户初始TaskSet可能会被饿死
val STARVATION_TIMEOUT_MS= conf.getTimeAsMs("spark.starvation.timeout", "15s")
// CPUs to request per task. 每个task要求一个cpu
val CPUS_PER_TASK= conf.getInt("spark.task.cpus", 1)
// TaskSetManagers are not thread safe, so any access toone should be synchronized on thisclass.
//TaskSetManagers不是线程安全的,因此任何访问都应该在这个类上同步。
private val taskSetsByStageIdAndAttempt = new HashMap[Int, HashMap[Int, TaskSetManager]]
private[scheduler]val taskIdToTaskSetManager = new HashMap[Long, TaskSetManager]
val taskIdToExecutorId= new HashMap[Long, String]
@volatile private var hasReceivedTask= false
@volatile private var hasLaunchedTask= false
private val starvationTimer = new Timer(true)
// Incrementing task IDs
val nextTaskId= new AtomicLong(0)
// Number of tasks running on each executor
private val executorIdToTaskCount = new HashMap[String,Int]
// The set of executors we have on each host; this isused to compute hostsAlive, which
// in turn is used to decide when we canattain data locality on a given host
//我们在每个主机上拥有的executors集合; 这用于计算hostsAlive,这反过来又被,用来决定我们何时可以在给定主机上获得数据局部性
protected val executorsByHost = new HashMap[String, HashSet[String]]
protected val hostsByRack = new HashMap[String, HashSet[String]]
protected val executorIdToHost = new HashMap[String, String]
// Listener object to pass upcalls into
var dagScheduler: DAGScheduler = null
var backend:SchedulerBackend = null
/** mapOutputTracker : new MapOutputTrackerMaster(conf) | new MapOutputTrackerWorker(conf)
MapOutputTrackerMaster属于driver,这里使用TimeStampedHashMap来跟踪 map的输出信息,也可以将旧信息进行清理(spark.cleaner.ttl)
* MapOutputTrackerMaster和MapOutputTrackerWorker(运行在Executor中)都继承了MapOutputTracker
* 1,MapOutputTrackerMaster是用来记录ShuffleMapTasks所需的map out的源
* a,shuffleReader读取shuffle文件之前就是去请求MapOutputTrackerMaster 要自己处理的数据 在哪里
* b,MapOutputTracker给它返回一批 MapOutputTrackerWorker的列表(地址,port等信息)
* 2,MapOutputTrackerWorker是仅仅作为cache用来执行shuffle计算
*/
val mapOutputTracker= SparkEnv.get.mapOutputTracker
var schedulableBuilder: SchedulableBuilder = null
var rootPool: Pool = null
// default scheduler is FIFO,调度以先进先出的方式进行
private val schedulingModeConf = conf.get("spark.scheduler.mode", "FIFO")
val schedulingMode: SchedulingMode = try {
//枚举方法,通过字符串"FIFO"得到枚举值
SchedulingMode.withName(schedulingModeConf.toUpperCase)
} catch {
case e:java.util.NoSuchElementException =>
throw new SparkException(s"Unrecognizedspark.scheduler.mode: $schedulingModeConf")
}
// This is a var so that we can reset it for testingpurposes.
//运行反序列化并远程提取(如果需要)任务结果的线程池。
private[spark]var taskResultGetter = new TaskResultGetter(sc.env, this)
4,接着createTaskScheduler()再开始初始化,SparkDeploySchedulerBackend,并将TaskSchedulerImpl,sparkContext还有格式化之后spark的Master和standby放进去
val scheduler = new TaskSchedulerImpl(sc)
//masterUrls :Array("spark://luyl152:7077","spark://luyl153:7077","spark://luyl154:7077")
val masterUrls =sparkUrl.split(",").map("spark://" + _)
//在standalone中调度类CoarseGrainedSchedulerBackend,由它的子类SparkDeploySchedulerBackend做调度
val backend = newSparkDeploySchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
a,查看SparkDeploySchedulerBackend初始化时:继承了CoarseGrainedSchedulerBackend,同时混入的AppClientListener并实现它的四个事件方法,然后初始化了一些成员变量
//构造方法就可以看到每种集群模式的调度类,使用TaskSchedulerImpl来执行task,
// 网友说是由SparkDeploySchedulerBackend给Task分配计算资源的,这个自己还有验证到
//masters : Array("spark://luyl152:7077","spark://luyl153:7077","spark://luyl154:7077")
private[spark] class SparkDeploySchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
masters: Array[String])
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)//可以看出它是粗粒度调度的子类
with AppClientListener
with Logging{
/**
* AppClientListener 靠部署的客户端来回调各种事件的发生, 目前有四个事件:
连接到群集,断开连接,被授予执行者,并删除执行者(无论是由于失败还是由于撤销)。
用户不能阻塞在回调方法中
*/
private var client:AppClient = null //Application与Master的接口
private var stopping = false
private val launcherBackend = new LauncherBackend(){
override protected def onStopRequest(): Unit = stop(SparkAppHandle.State.KILLED)
}
@volatile var shutdownCallback: SparkDeploySchedulerBackend => Unit = _
@volatile private var appId: String = _
private val registrationBarrier = new Semaphore(0)
//请求应用程序的最大CPU内核数量,如果没有设置,会使用spark.deploy.defaultCores
private val maxCores = conf.getOption("spark.cores.max").map(_.toInt)
private val totalExpectedCores = maxCores.getOrElse(0)
b,然后就是调用TaskSchedulerImpl.initialize(SparkDeploySchedulerBackend)
def initialize(backend: SchedulerBackend) {
//SparkContext.createTaskScheduler()调用了该方法,同时会将SparkDeploySchedulerBackend注入进来,设置到TaskSchedulerImpl.backend成员上
this.backend = backend
// temporarily set rootPool name to empty
//一个可调度的实体,表示Pools或TaskSetManagers的集合
rootPool = new Pool("", schedulingMode, 0, 0)
schedulableBuilder = {
schedulingMode match {
case SchedulingMode.FIFO =>
new FIFOSchedulableBuilder(rootPool)
case SchedulingMode.FAIR =>
new FairSchedulableBuilder(rootPool, conf)
}
}
//如果是FIFOSchedulableBuilder.buildPools()什么也没有干
schedulableBuilder.buildPools()
}
最后createTaskScheduler()方法返回:元组(SparkDeploySchedulerBackend,TaskSchedulerImpl)