4. Spark源码解析之Master实例化流程解析

当脚本启动Master流程走到最后的时候,是解析成命令后返回给spark-class的 exec ${CMD[@]} 真正执行,也就是在这里真正开始初始化Master。

这里的命令是:

java -cp ../jar/* org.apache.spark.deploy.master.Master  --port 7077 --webui-port 8080

 

org.apache.spark.deploy.master.Master

 先进入master伴生类object master看看。

private[deploy] object Master extends Logging {
  val SYSTEM_NAME = "sparkMaster"
  val ENDPOINT_NAME = "Master"

  // spark-class传入参数:--port 7077 --webui-port 8080
  def main(argStrings: Array[String]) {
    Thread.setDefaultUncaughtExceptionHandler(new SparkUncaughtExceptionHandler(
      exitOnUncaughtException = false))
    // 初始化日志输出
    Utils.initDaemon(log)

    // 创建conf对象其实是读取Spark默认配置的参数
    val conf = new SparkConf

    // 解析配置,初始化RPC服务和终端
    // 通过MasterArguments解析RPC需要的参数:host,port,webui-port
    val args = new MasterArguments(argStrings, conf)
    val (rpcEnv, _, _) = startRpcEnvAndEndpoint(args.host, args.port, args.webUiPort, conf)
    rpcEnv.awaitTermination()
  }

  /**
   * Start the Master and return a three tuple of:
   *   (1) The Master RpcEnv
   *   (2) The web UI bound port
   *   (3) The REST server bound port, if any
   */
  // 上面有说明,startRpcEnvAndEndpoint会返回一个元组包含,RPC,webui,rest服务的绑定端口
  // 那么这里主要是初始化SecurityManager,RPC,及rest绑定端口
  def startRpcEnvAndEndpoint(
      host: String,
      port: Int,
      webUiPort: Int,
      conf: SparkConf): (RpcEnv, Int, Option[Int]) = {
    // 初始化securityManager
    val securityMgr = new SecurityManager(conf)
    // 初始化RPC
    val rpcEnv = RpcEnv.create(SYSTEM_NAME, host, port, conf, securityMgr)
    // 向RPC注册master终端
    // 这里new Master的时候进行了Master的实例化
    val masterEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME,
      new Master(rpcEnv, rpcEnv.address, webUiPort, securityMgr, conf))
    // rest的绑定端口
    val portsResponse = masterEndpoint.askSync[BoundPortsResponse](BoundPortsRequest)
    (rpcEnv, portsResponse.webUIPort, portsResponse.restPort)
  }
}

SparkConf 

可以看到,加载默认配置的时候,是以spark.匹配,只要是spark.开头的配置都加载了。其实就是之前加载的spark-config.sh,将conf目录下导入到环境变量中的所有spark配置,主要是spark-default.conf文件中的设置,下面大概列举了conf文件的配置。

class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Serializable {

  import SparkConf._

  /** Create a SparkConf that loads defaults from system properties and the classpath */
  def this() = this(true)

  private val settings = new ConcurrentHashMap[String, String]()

  @transient private lazy val reader: ConfigReader = {
    val _reader = new ConfigReader(new SparkConfigProvider(settings))
    _reader.bindEnv(new ConfigProvider {
      override def get(key: String): Option[String] = Option(getenv(key))
    })
    _reader
  }

  if (loadDefaults) {
    loadFromSystemProperties(false)
  }

  // 加载之前调用spark-config.sh导入的环境变量,匹配以spark.开头的所有配置
  private[spark] def loadFromSystemProperties(silent: Boolean): SparkConf = {
    // Load any spark.* system properties
    for ((key, value) <- Utils.getSystemProperties if key.startsWith("spark.")) {
      set(key, value, silent)
    }
    this
  }
}

Spark-default.conf

HA环境下,spark-env.sh中的相关配置也会导入到conf中。

# Example:
# spark.master                     spark://master:7077
# spark.eventLog.enabled           true
# spark.eventLog.dir               hdfs://namenode:8021/directory
# spark.serializer                 org.apache.spark.serializer.KryoSerializer
# spark.driver.memory              5g
# spark.executor.extraJavaOptions  -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three"
spark.eventLog.enabled  true
spark.eventLog.dir      hdfs://spark/sparklog

MasterArguments

接着Master中,MasterArguments方法这里传入了spark-class传入的参数,及默认配置conf,将这些配置进行合并。

private[master] class MasterArguments(args: Array[String], conf: SparkConf) extends Logging {
  // 这里获取了hostname,及默认定义port7077,webuiport8080
  var host = Utils.localHostName()
  var port = 7077
  var webUiPort = 8080
  var propertiesFile: String = null

  // Check for settings in environment variables
  // spark-env.sh配置的参数
  // 通过load-spark-env.sh导入到环境变量中,在这里会加载
  // 加载环境变量中的配置,获取hostname,如果设置了IP,会提示使用hostname
  if (System.getenv("SPARK_MASTER_IP") != null) {
    logWarning("SPARK_MASTER_IP is deprecated, please use SPARK_MASTER_HOST")
    host = System.getenv("SPARK_MASTER_IP")
  }

  if (System.getenv("SPARK_MASTER_HOST") != null) {
    host = System.getenv("SPARK_MASTER_HOST")
  }

  if (System.getenv("SPARK_MASTER_PORT") != null) {
    port = System.getenv("SPARK_MASTER_PORT").toInt
  }
  if (System.getenv("SPARK_MASTER_WEBUI_PORT") != null) {
    webUiPort = System.getenv("SPARK_MASTER_WEBUI_PORT").toInt
  }

  // 将spark-class传入的参数port,webuiport及默认获取的host进行解析
  parse(args.toList)

  // This mutates the SparkConf, so all accesses to it must be made after this line
  // 所有默认配置加载,也就是conf,和--properties-file指定的属性文件
  propertiesFile = Utils.loadDefaultSparkProperties(conf, propertiesFile)

  if (conf.contains("spark.master.ui.port")) {
    webUiPort = conf.get("spark.master.ui.port").toInt
  }

  @tailrec
  // 将传入参数解析的方法
  // 也就是:--port 7077 --webui-port 8080
  // scala的list模式匹配
  private def parse(args: List[String]): Unit = args match {
    case ("--ip" | "-i") :: value :: tail =>
      Utils.checkHost(value)
      host = value
      parse(tail)

    case ("--host" | "-h") :: value :: tail =>
      Utils.checkHost(value)
      host = value
      parse(tail)

    case ("--port" | "-p") :: IntParam(value) :: tail =>
      port = value
      parse(tail)

    case "--webui-port" :: IntParam(value) :: tail =>
      webUiPort = value
      parse(tail)

    case ("--properties-file") :: value :: tail =>
      propertiesFile = value
      parse(tail)

    case ("--help") :: tail =>
      printUsageAndExit(0)

    case Nil => // No-op

    case _ =>
      printUsageAndExit(1)
  }

  /**
   * Print usage and exit JVM with the given exit code.
   */
  // 根据退出码打印帮助信息及退出
  private def printUsageAndExit(exitCode: Int) {
    // scalastyle:off println
    System.err.println(
      "Usage: Master [options]\n" +
      "\n" +
      "Options:\n" +
      "  -i HOST, --ip HOST     Hostname to listen on (deprecated, please use --host or -h) \n" +
      "  -h HOST, --host HOST   Hostname to listen on\n" +
      "  -p PORT, --port PORT   Port to listen on (default: 7077)\n" +
      "  --webui-port PORT      Port for web UI (default: 8080)\n" +
      "  --properties-file FILE Path to a custom Spark properties file.\n" +
      "                         Default is conf/spark-defaults.conf.")
    // scalastyle:on println
    System.exit(exitCode)
  }
}

loadDefaultSparkProperties

加载默认配置,如果没有通过--properties-file指定配置文件,那么这里filePath是null。

循环遍历了所有以spark.开头的配置属性,加载到path变量中。

  /**
   * Load default Spark properties from the given file. If no file is provided,
   * use the common defaults file. This mutates state in the given SparkConf and
   * in this JVM's system properties if the config specified in the file is not
   * already set. Return the path of the properties file used.
   */
  def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
    // getDefaultPropertiesFile加载conf目录下所有配置
    val path = Option(filePath).getOrElse(getDefaultPropertiesFile())

    // 将配置进行遍历,过滤出以spark.开头的配置
    Option(path).foreach { confFile =>
      getPropertiesFromFile(confFile).filter { case (k, v) =>
        k.startsWith("spark.")

        // 跟spark-env中导入的配置进行校队
      }.foreach { case (k, v) =>
        conf.setIfMissing(k, v)
        sys.props.getOrElseUpdate(k, v)
      }
    }
    path
  }

 getDefaultPropertiesFile

通过制定的分隔规则来加载conf目录下的配置。

  /** Return the path of the default Spark properties file. */
  def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
    env.get("SPARK_CONF_DIR")
      .orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf" })
      .map { t => new File(s"$t${File.separator}spark-defaults.conf")}
      .filter(_.isFile)
      .map(_.getAbsolutePath)
      .orNull
  }

getPropertiesFromFile

就是对加载的配置进行格式化,解析成k/v。

  /** Load properties present in the given file. */
  def getPropertiesFromFile(filename: String): Map[String, String] = {
    val file = new File(filename)
    require(file.exists(), s"Properties file $file does not exist")
    require(file.isFile(), s"Properties file $file is not a normal file")

    val inReader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)
    try {
      val properties = new Properties()
      properties.load(inReader)
      properties.stringPropertyNames().asScala.map(
        k => (k, properties.getProperty(k).trim)).toMap
    } catch {
      case e: IOException =>
        throw new SparkException(s"Failed when loading Spark properties from $filename", e)
    } finally {
      inReader.close()
    }
  }

MasterArguments加载完成,继续上面Main中,startRpcEnvAndEndpoint()。

初始化SecurityManager,RPC,及rest绑定端口。

SecurityManager安全管理器和RPC机制后面另外再做解析。

  def startRpcEnvAndEndpoint(
      host: String,
      port: Int,
      webUiPort: Int,
      conf: SparkConf): (RpcEnv, Int, Option[Int]) = {
    // 初始化securityManager,账号和权限管理
    val securityMgr = new SecurityManager(conf)
    // 初始化RPC
    val rpcEnv = RpcEnv.create(SYSTEM_NAME, host, port, conf, securityMgr)
    val masterEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME,
      new Master(rpcEnv, rpcEnv.address, webUiPort, securityMgr, conf))
    // rest绑定端口
    // rpc终端端口7077, webUi-port8080,restPort6066
    val portsResponse = masterEndpoint.askSync[BoundPortsResponse](BoundPortsRequest)
    (rpcEnv, portsResponse.webUIPort, portsResponse.restPort)
  }
}

继续往下执行,会启动RPC,并在向RPC注册时new Master实例化Master,返回看看class Master到底实例化了些什么。

class Master 

初始化了Master的各种参数,并注册好RPC和Metrics,以及为Worker连接准备好了环境。

private[deploy] class Master(
    override val rpcEnv: RpcEnv,
    address: RpcAddress,
    webUiPort: Int,
    val securityMgr: SecurityManager,
    val conf: SparkConf)
  extends ThreadSafeRpcEndpoint with Logging with LeaderElectable {

  // 发送消息的线程
  private val forwardMessageThread =
    ThreadUtils.newDaemonSingleThreadScheduledExecutor("master-forward-message-thread")

  // 加载hadoopconf配置
  private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)

  // For application IDs
  // 设置app的ID
  private def createDateFormat = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US)

  // worker连接超时时间,默认60秒
  private val WORKER_TIMEOUT_MS = conf.getLong("spark.worker.timeout", 60) * 1000
  // 执行完成的app在webUI里保存数量,默认200
  private val RETAINED_APPLICATIONS = conf.getInt("spark.deploy.retainedApplications", 200)
  // 保存执行的driver数量,默认200
  private val RETAINED_DRIVERS = conf.getInt("spark.deploy.retainedDrivers", 200)
  // 断开的worker信息保存数量,默认15
  private val REAPER_ITERATIONS = conf.getInt("spark.dead.worker.persistence", 15)
  // worker回复模式,默认none
  private val RECOVERY_MODE = conf.get("spark.deploy.recoveryMode", "NONE")
  // executor最大运行,默认10
  private val MAX_EXECUTOR_RETRIES = conf.getInt("spark.deploy.maxExecutorRetries", 10)

  // workers节点信息
  val workers = new HashSet[WorkerInfo]
  // appID对应的app信息
  val idToApp = new HashMap[String, ApplicationInfo]
  // 队列中等待执行的app信息
  private val waitingApps = new ArrayBuffer[ApplicationInfo]
  // 所有app信息
  val apps = new HashSet[ApplicationInfo]

  // workerID对应的worker节点信息
  private val idToWorker = new HashMap[String, WorkerInfo]
  // RPC中注册的ip对应的worker信息
  private val addressToWorker = new HashMap[RpcAddress, WorkerInfo]

  // app与对应在RPC注册的信息
  private val endpointToApp = new HashMap[RpcEndpointRef, ApplicationInfo]
  // RPC中注册的ip与对应的app
  private val addressToApp = new HashMap[RpcAddress, ApplicationInfo]
  // 完成的app
  private val completedApps = new ArrayBuffer[ApplicationInfo]
  private var nextAppNumber = 0

  // driver信息
  private val drivers = new HashSet[DriverInfo]
  private val completedDrivers = new ArrayBuffer[DriverInfo]
  // Drivers currently spooled for scheduling
  private val waitingDrivers = new ArrayBuffer[DriverInfo]
  private var nextDriverNumber = 0

  // 检查host
  Utils.checkHost(address.host)
  // assert(host != null && host.indexOf(':') == -1, s"Expected hostname (not IP) but got $host")

  // master注册Metrics服务
  private val masterMetricsSystem = MetricsSystem.createMetricsSystem("master", conf, securityMgr)
  // app注册Metrics服务
  private val applicationMetricsSystem = MetricsSystem.createMetricsSystem("applications", conf,
    securityMgr)
  // 就是包含了上面所有信息的master
  private val masterSource = new MasterSource(this)

  // After onStart, webUi will be set
  // webUI,在onStart后会设置
  private var webUi: MasterWebUI = null

  // master的公共访问地址,本地启动为192.168.2.1
  private val masterPublicAddress = {
    val envVar = conf.getenv("SPARK_PUBLIC_DNS")
    if (envVar != null) envVar else address.host
  }

  // master的url,即:spark://192.168.2.1:7077
  private val masterUrl = address.toSparkURL
  private var masterWebUiUrl: String = _

  // 恢复状态为从standby恢复
  private var state = RecoveryState.STANDBY

  // 持久化引擎
  private var persistenceEngine: PersistenceEngine = _

  // 选主
  private var leaderElectionAgent: LeaderElectionAgent = _

  // 恢复已完成tesk
  private var recoveryCompletionTask: ScheduledFuture[_] = _

  // 超时的test检查
  private var checkForWorkerTimeOutTask: ScheduledFuture[_] = _

  // As a temporary workaround before better ways of configuring memory, we allow users to set
  // a flag that will perform round-robin scheduling across the nodes (spreading out each app
  // among all the nodes) instead of trying to consolidate each app onto a small # of nodes.
  // 开启内存分配之前的临时解决方案,将app发送到多节点进行计算
  private val spreadOutApps = conf.getBoolean("spark.deploy.spreadOut", true)

  // Default maxCores for applications that don't specify it (i.e. pass Int.MaxValue)
  // 未指定参数的app,默认使用最大cores进行计算
  private val defaultCores = conf.getInt("spark.deploy.defaultCores", Int.MaxValue)
  // 是否使用反向代理,默认关闭
  val reverseProxy = conf.getBoolean("spark.ui.reverseProxy", false)
  if (defaultCores < 1) {
    throw new SparkException("spark.deploy.defaultCores must be positive")
  }

  // Alternative application submission gateway that is stable across Spark versions
  // 备用app提交网关
  // 开启rest服务,默认开启
  private val restServerEnabled = conf.getBoolean("spark.master.rest.enabled", true)
  private var restServer: Option[StandaloneRestServer] = None
  private var restServerBoundPort: Option[Int] = None

当new Master到这里就实例化完成,会返回main中继续执行,执行到rpcEnv.awaitTermination()会处于等待Worker连接状态。

继续看看Master继承了ThreadSafeRpcEndpoint、Logging和LeaderElectable。

private[deploy] class Master(
    override val rpcEnv: RpcEnv,
    address: RpcAddress,
    webUiPort: Int,
    val securityMgr: SecurityManager,
    val conf: SparkConf)
  extends ThreadSafeRpcEndpoint with Logging with LeaderElectable {
  ...
  }
}

ThreadSafeRpcEndpoint

ThreadSafeRpcEndpoint是个抽象接口,继承了的RpcEndpoint。

private[spark] trait ThreadSafeRpcEndpoint extends RpcEndpoint

 RpcEndpoint是通过RpcEnvFactory工厂模式创建的RpcEnv实例。 

private[spark] trait RpcEnvFactory {

  def create(config: RpcEnvConfig): RpcEnv
}

RPC的生命周期有onStart、receive、onStop,Master重写了这三个方法,所以Master初始化时也会执行onStart。

onStart

Master的onStart中,主要是将所有信息准备就绪。

  override def onStart(): Unit = {
    logInfo("Starting Spark master at " + masterUrl)
    logInfo(s"Running Spark version ${org.apache.spark.SPARK_VERSION}")
    // 绑定masterwebUI地址和端口
    webUi = new MasterWebUI(this, webUiPort)
    webUi.bind()
    masterWebUiUrl = "http://" + masterPublicAddress + ":" + webUi.boundPort
    if (reverseProxy) {
      masterWebUiUrl = conf.get("spark.ui.reverseProxyUrl", masterWebUiUrl)
      webUi.addProxy()
      logInfo(s"Spark Master is acting as a reverse proxy. Master, Workers and " +
       s"Applications UIs are available at $masterWebUiUrl")
    }
    checkForWorkerTimeOutTask = forwardMessageThread.scheduleAtFixedRate(new Runnable {
      override def run(): Unit = Utils.tryLogNonFatalError {
        self.send(CheckForWorkerTimeOut)
      }
    }, 0, WORKER_TIMEOUT_MS, TimeUnit.MILLISECONDS)

    // rest信息
    if (restServerEnabled) {
      val port = conf.getInt("spark.master.rest.port", 6066)
      restServer = Some(new StandaloneRestServer(address.host, port, conf, self, masterUrl))
    }
    restServerBoundPort = restServer.map(_.start())

    // 注册MetricSystem测量系统
    masterMetricsSystem.registerSource(masterSource)
    masterMetricsSystem.start()
    applicationMetricsSystem.start()
    // Attach the master and app metrics servlet handler to the web ui after the metrics systems are
    // started.
    // 将MetricSystem测量系统添加到webUI
    masterMetricsSystem.getServletHandlers.foreach(webUi.attachHandler)
    applicationMetricsSystem.getServletHandlers.foreach(webUi.attachHandler)

    // 序列化配置信息
    val serializer = new JavaSerializer(conf)
    // 匹配持久化方式ZOOKEEPER、FILESYSTEM、CUSTOM
    val (persistenceEngine_, leaderElectionAgent_) = RECOVERY_MODE match {
      case "ZOOKEEPER" =>
        logInfo("Persisting recovery state to ZooKeeper")
        val zkFactory =
          new ZooKeeperRecoveryModeFactory(conf, serializer)
        (zkFactory.createPersistenceEngine(), zkFactory.createLeaderElectionAgent(this))
      case "FILESYSTEM" =>
        val fsFactory =
          new FileSystemRecoveryModeFactory(conf, serializer)
        (fsFactory.createPersistenceEngine(), fsFactory.createLeaderElectionAgent(this))
      case "CUSTOM" =>
        val clazz = Utils.classForName(conf.get("spark.deploy.recoveryMode.factory"))
        val factory = clazz.getConstructor(classOf[SparkConf], classOf[Serializer])
          .newInstance(conf, serializer)
          .asInstanceOf[StandaloneRecoveryModeFactory]
        (factory.createPersistenceEngine(), factory.createLeaderElectionAgent(this))
      case _ =>
        // 这里调用了选主机制
        (new BlackHolePersistenceEngine(), new MonarchyLeaderAgent(this))
    }
    // 默认不做持久化
    persistenceEngine = persistenceEngine_
    leaderElectionAgent = leaderElectionAgent_
  }

Logging日志系统相关就不在这里查看了。

LeaderElectable

选主。master调用的是MonarchyLeaderAgent,而MonarchyLeaderAgent实际是抽象接口LeaderElectable的实例对象。

@DeveloperApi
trait LeaderElectionAgent {
  val masterInstance: LeaderElectable
  def stop() {} // to avoid noops in implementations.
}

@DeveloperApi
trait LeaderElectable {
  def electedLeader(): Unit
  def revokedLeadership(): Unit
}
/** Single-node implementation of LeaderElectionAgent -- we're initially and always the leader. */
// master是单节点实现
private[spark] class MonarchyLeaderAgent(val masterInstance: LeaderElectable)
  extends LeaderElectionAgent {
  masterInstance.electedLeader()
}

 如果Master注册选主,通过重写electedLeader方法,将信息发送,也就是发送master将进行选主的信息,如果是关闭,这里会是发送撤销。

  override def electedLeader() {
    self.send(ElectedLeader)
  }
  override def revokedLeadership() {
    self.send(RevokedLeadership)
  }

这里是MasterMessages的ElectedLeader

private[master] object MasterMessages {

  // LeaderElectionAgent to Master

  case object ElectedLeader
    
  ...
}

在下面的receive方法中接收信息进行处理,判断选主情况和接受Worker注册。

  // 接受信息判断主从情况
  override def receive: PartialFunction[Any, Unit] = {
    case ElectedLeader =>
      // 如果选主,通过RPC及持久化信息进行判断是否存活
      val (storedApps, storedDrivers, storedWorkers) = persistenceEngine.readPersistedData(rpcEnv)
      state = if (storedApps.isEmpty && storedDrivers.isEmpty && storedWorkers.isEmpty) {
        RecoveryState.ALIVE
      } else {
        RecoveryState.RECOVERING
      }
      logInfo("I have been elected leader! New state: " + state)
      if (state == RecoveryState.RECOVERING) {
        beginRecovery(storedApps, storedDrivers, storedWorkers)
        recoveryCompletionTask = forwardMessageThread.schedule(new Runnable {
          override def run(): Unit = Utils.tryLogNonFatalError {
            self.send(CompleteRecovery)
          }
        }, WORKER_TIMEOUT_MS, TimeUnit.MILLISECONDS)
      }

    // 重新恢复主
    case CompleteRecovery => completeRecovery()

    // 如果主被撤销,提示并退出
    case RevokedLeadership =>
      logError("Leadership has been revoked -- master shutting down.")
      System.exit(0)

    // 如果是注册worker
    // 这里需要worker、master信息和worker配置信息
    case RegisterWorker(
      id, workerHost, workerPort, workerRef, cores, memory, workerWebUiUrl, masterAddress) =>
      logInfo("Registering worker %s:%d with %d cores, %s RAM".format(
        workerHost, workerPort, cores, Utils.megabytesToString(memory)))
      // 判断master存活,进行worker提示或者注册
      if (state == RecoveryState.STANDBY) {
        workerRef.send(MasterInStandby)

        // master存活,根据id判断worker是否已经存在
      } else if (idToWorker.contains(id)) {
        workerRef.send(RegisterWorkerFailed("Duplicate worker ID"))
      } else {
        // master存活,且workerid不重复,进行注册
        val worker = new WorkerInfo(id, workerHost, workerPort, cores, memory,
          workerRef, workerWebUiUrl)
        if (registerWorker(worker)) {
          persistenceEngine.addWorker(worker)
          workerRef.send(RegisteredWorker(self, masterWebUiUrl, masterAddress))
          schedule()
        } else {
          val workerAddress = worker.endpoint.address
          logWarning("Worker registration failed. Attempted to re-register worker at same " +
            "address: " + workerAddress)
          workerRef.send(RegisterWorkerFailed("Attempted to re-register worker at same address: "
            + workerAddress))
        }
      }

到这里Master实例化就结束了,所有内部机制和地址端口都已经准备就绪,可以通过webUI进行访问。

Master的地址是启动脚本的host,端口8080,可以通过 http://host:8080进行访问。

返回main方法中,继续往下执行,也就是执行到rpcEnv.awaitTermination(),开始等待Worker连接。

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

訾零

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值