一、创建taskscheduler
1)创建TaskSchedulerImpl
2)创建SparkDeploySchedulerBackend
3)taskscheduler初始化 initialize
创建调度池 schedulerpool(FIFO和FAIR调度方法)
二、创建dagScheduler
三、taskscheduler启动
调用taskschedulerImpl的start方法
调用SparkDeploySchedulerBackend的start方法
1)创建ApplicationDescription
2)创建AppClient
4)appclient启动
3) 创建ClientActor
registerWithMaster()
tryRegisterAllMasters()
4)向master注册application信息
actor ! RegisterApplication(appDescription)
一、创建taskscheduler
// TODO 创建一个taskScheduler
private[spark] var (schedulerBackend, taskScheduler) =
SparkContext.createTaskScheduler(this, master)
/**
* TODO 根据提交任务时制定的url创建相应taskScheduler
*/
private def createTaskScheduler(
...
// TODO 我们重点分析spark standalone模式
case SPARK_REGEX(sparkUrl) =>
// TODO 创建 TaskSchedulerImpl
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
// TODO 创建SparkDeploySchedulerBackend
val backend = new SparkDeploySchedulerBackend(scheduler, sc, masterUrls)
// TODO 调用 initialize 创建调度器
scheduler.initialize(backend)
(backend, scheduler)
接下来重点分析TaskSchedulerImpl ,SparkDeploySchedulerBackend ,initialize
1) 创建TaskSchedulerImpl
/**
* 1、底层通过操作一个schedulerbackend,针对不同种类的cluser(standalone、yarn)调度task
* 2、它也可以通过使用一个localbackend,并且将islocal参数设置为true,在本地模式执行
* 3、它负责一些通用的逻辑,比如说决定多个job的调度顺序,启动推测任务执行
* 4、客户端首先应该调用它的initialize()方法和start()方法,然后通过runtask方法提交taskset
*/
private[spark] class TaskSchedulerImpl(
2)创建SparkDeploySchedulerBackend
private[spark] class SparkDeploySchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
masters: Array[String])
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.actorSystem)
with AppClientListener
with Logging {
3) taskschduler初始化
def initialize(backend: SchedulerBackend) {
this.backend = backend
// 创建调度池 temporarily set rootPool name to empty
rootPool = new Pool("", schedulingMode, 0, 0)
schedulableBuilder = {
schedulingMode match {
case SchedulingMode.FIFO =>
new FIFOSchedulableBuilder(rootPool)
case SchedulingMode.FAIR =>
new FairSchedulableBuilder(rootPool, conf)
}
}
schedulableBuilder.buildPools()
}
二、创建dagScheduler
try {
/**
* TODO 创建一个dagScheduler,用来切分stage
*/
dagScheduler = new DAGScheduler(this)
} catch {
case e: Exception => {
try {
stop()
} finally {
throw new SparkException("Error while constructing DAGScheduler", e)
}
}
}
/**
* 实现了面向stage的调度机制的最高层次的调度层,他会为每个job计算一个stage的DAG
* 追踪RDD和stage的输出是否被物化了(物化就是说,写入磁盘或者内存等地方),并且寻找一个最少消耗(最优,最小)
* 调度机制来运行job,他会将stage作为tasksets提交到底层的TaskSchedulerImpl上,来在集群
* 上运行他们(task)
*
* 除了处理stage的DAG,它还负责决定运行每个task的最佳位置,基于当前的缓层状态,
* 将这些最佳位置提交给底层的TaskSchedulerImpl。此外,它会处理由于shuffle输出文件丢失导致的失败
* 在这种情况下,旧的stage可能就会被重新提交。一个stage内部的失败,如果不是由于shuffle文件丢失导致的,
* 会被TaskScheduler处理,他会多次重试每一个task,直到最后,实在不行了,才会去取消整个stage
*/
private[spark] class DAGScheduler(
三、taskscheduler启动
//TODO 启动taskScheduler 调用taskschedulerImpl的start方法
taskScheduler.start()
->TaskschedulerImpl.start()
override def start() {
// TODO 调用 SparkDeploySchedulerBackEnd 方法
backend.start()
}
-> SparkDeploySchedulerBackEnd.start()
override def start() {
// TODO 调用父类 的start方法来创建DriverActor
super.start()
// The endpoint for executors to talk to us
// TODO 准备参数,然后将这些参数封装到一个对象中,然后将这个对象发送给master
val driverUrl = AkkaUtils.address(
AkkaUtils.protocol(actorSystem),
SparkEnv.driverActorSystemName,
conf.get("spark.driver.host"),
conf.get("spark.driver.port"),
CoarseGrainedSchedulerBackend.ACTOR_NAME)
val args = Seq(
"--driver-url", driverUrl,
"--executor-id", "{{EXECUTOR_ID}}",
"--hostname", "{{HOSTNAME}}",
"--cores", "{{CORES}}",
"--app-id", "{{APP_ID}}",
"--worker-url", "{{WORKER_URL}}")
val extraJavaOpts = sc.conf.getOption("spark.executor.extraJavaOptions")
.map(Utils.splitCommandString).getOrElse(Seq.empty)
val classPathEntries = sc.conf.getOption("spark.executor.extraClassPath")
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
val libraryPathEntries = sc.conf.getOption("spark.executor.extraLibraryPath")
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
// When testing, expose the parent class path to the child. This is processed by
// compute-classpath.{cmd,sh} and makes all needed jars available to child processes
// when the assembly is built with the "*-provided" profiles enabled.
val testingClassPath =
if (sys.props.contains("spark.testing")) {
sys.props("java.class.path").split(java.io.File.pathSeparator).toSeq
} else {
Nil
}
// Start executors with a few necessary configs for registering with the scheduler
val sparkJavaOpts = Utils.sparkJavaOpts(conf, SparkConf.isExecutorStartupConf)
val javaOpts = sparkJavaOpts ++ extraJavaOpts
/**
* TODO 重要,这个参数是以后executor的实现类
* org.apache.spark.executor.CoarseGrainedExecutorBackend worker上启动Executor
*/
val command = Command("org.apache.spark.executor.CoarseGrainedExecutorBackend",
args, sc.executorEnvs, classPathEntries ++ testingClassPath, libraryPathEntries, javaOpts)
val appUIAddress = sc.ui.map(_.appUIAddress).getOrElse("")
1) 创建ApplicationDescription
// 当前执行的application的一些情况
// 包括application最大需要多少cpucore,每个slave需要多少内存
// TODO 封装参数到ApplicationDescription
val appDesc = new ApplicationDescription(sc.appName, maxCores, sc.executorMemory, command,
appUIAddress, sc.eventLogDir, sc.eventLogCodec)
2) 创建AppClient
// 把 ApplicationDescription 通过主构造器传入
client = new AppClient(sc.env.actorSystem, masters, appDesc, this, conf)
3) appclient启动
// TODO 在这个方法里创建ClientActor用于与master通信
client.start()
}
-> 创建AppClient
/**
* 负责application与spark集群进行通信
* 它会接受一个sparkmaster的url,以及一个application和一个集群事件的监听器,
* 以及各种事件发生时,监听器的回掉函数
*/
private[spark] class AppClient(
-> appclient启动
def start() {
// Just launch an actor; it will call back into the listener.
// TODO 调用ClientActor构造方法和preStat方法
actor = actorSystem.actorOf(Props(new ClientActor))
}
new ClientActor -> class ClientActor extends Actor with ActorLogReceive
....
override def preStart() {
context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])
try {
// TODO ClientActor向master注册
registerWithMaster()
} catch {
case e: Exception =>
logWarning("Failed to connect to master", e)
markDisconnected()
context.stop(self)
}
}
-> 向master注册application信息
registerWithMaster()-> tryRegisterAllMasters() ->
def tryRegisterAllMasters() {
for (masterAkkaUrl <- masterAkkaUrls) {
logInfo("Connecting to master " + masterAkkaUrl + "...")
// TODO 循环所有master地址,和master建立连接
val actor = context.actorSelection(masterAkkaUrl)
// TODO 拿到了master的一个引用,然后向master发送注册应用的请求,所有的参数都封装到 appDescription
actor ! RegisterApplication(appDescription)
}
}