Scala基础语法18_Master 和 worker通信

MessageProtocol.scala

package cn.sheep.spark

// worker --> master
//worker向master注册自己的信息
case class RegisterWorkerInfo(id: String,core: Int,ram: Int)
//worker 给 master发送心跳信息
case class HeartBeat(id: String)

//master --> worker
//master向worker发送注册成功的消息
case object RegisteredWorkerInfo

//worker 发送给自己的消息,告诉自己说要周期性的向master发送心跳消息
case object SendHeartBeat

//master发送给自己的消息,,启动一个调度器,定期检查超时的worker
case object CheckTimeOutWorker

//master发送给自己的消息,删除超时的worker
case object RemoveTimeOutWorker

//存储worker信息的类
case class WorkerInfo(val id: String,core: Int,ram: Int)
{
  var lastHeartBeatTime: Long = _
}

 SparkMaster.scala

package cn.sheep.spark

import akka.actor.{Actor, ActorSystem, Props}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._

class SparkMaster extends Actor{
  //存储worker的信息的
  val id2WorkerInfo = collection.mutable.HashMap[String, WorkerInfo]()

  override def receive: Receive = {
        //收到worker注册过来的信息
    case RegisterWorkerInfo(wkId,core,ram) => {
      //将worker的信息存储起来,存储到HashMap
      if (!id2WorkerInfo.contains(wkId)) {
        val workerInfo = new WorkerInfo(wkId, core, ram)
        id2WorkerInfo += ((wkId, workerInfo))
        //master存储完worker注册的数据之后,要告诉worker注册成功的消息
        sender() ! RegisteredWorkerInfo //worker收到消息

      }
    }
      case HeartBeat(wkId) =>{
        //master收到worker的心跳消息后,更新worker的上一次心跳时间
        val workerInfo = id2WorkerInfo(wkId)

        val currentTime = System.currentTimeMillis()

        workerInfo.lastHeartBeatTime = currentTime
        
      }
    case CheckTimeOutWorker =>{

      import context.dispatcher //使用调度器时必须导入dispatcher
      context.system.scheduler.schedule(0 millis, 6000 millis,self, RemoveTimeOutWorker)
    }
    case RemoveTimeOutWorker =>{
      //将hashMap中的所有的value都拿出来,查看当前时间和上一次心跳时间差 3000
      val currentTime = System.currentTimeMillis()
      val workerInfos =  id2WorkerInfo.values

      //过滤出超时的worker
      workerInfos
        .filter(wkInfo => currentTime - wkInfo.lastHeartBeatTime > 3000)
        .foreach(wk => id2WorkerInfo.remove(wk.id))

      println(s"还剩 ${id2WorkerInfo.size}存活的worker")
    }
  }
}


object SparkMaster{

  def main(args: Array[String]): Unit = {
    //校验参数
    if(args.length !=3)
    {
      println(
        """
          |请输入参数:<host> <port> <masterName>
          |""".stripMargin)
      sys.exit()  //退出程序
    }
    val host = args(0)
    val port = args(1)
    val masterName = args(2)



    val config = ConfigFactory.parseString(
      s"""
         |akka.actor.provider = "akka.remote.RemoteActorRefProvider"
         |akka.remote.netty.tcp.hostname = $host
         |akka.remote.netty.tcp.port = $port
         |""".stripMargin)
    val masterActorSystem = ActorSystem("sparkMaster", config)
    val masterActorRef = masterActorSystem.actorOf(Props[SparkMaster], masterName)
    masterActorRef ! CheckTimeOutWorker
  }
}

SparkWorker.scala

package cn.sheep.spark

import java.util.UUID

import akka.actor.{Actor, ActorSelection, ActorSystem, Props}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._ //导入时间单位

class SparkWorker(masterURL: String) extends Actor{

  var masterProxy: ActorSelection = _
  val workerId = UUID.randomUUID().toString

  override def preStart(): Unit = {
   masterProxy = context.actorSelection(masterURL)
  }

  override def receive: Receive = {

    case "started" => {//自己已就绪
      //worker 要向 master注册自己(信息),id,core,mem
      masterProxy ! RegisterWorkerInfo(workerId,4,32*1024) //此时master 会收到
    }
    case RegisteredWorkerInfo => {//收到注册成功消息
      //此时worker 启动一个定时器,定时向master发送心跳
      //用毫秒单位之类的,需要手动导包import scala.concurrent.duration._
      //(延时时间,间隔时间,接收方)
      import context.dispatcher
      context.system.scheduler.schedule(0 millis, 1500 millis, self,SendHeartBeat)
    }
    case SendHeartBeat =>{
      //开始向master发送心跳了
      println(s"------$workerId 发送心跳---------")
      masterProxy ! HeartBeat(workerId)
    }

  }
}

object SparkWorker{
  def main(args: Array[String]): Unit = {

    //校验参数
    if(args.length !=4)
      {
        println(
          """
            |请输入参数:<host> <port> <workerName> <masterURL>
            |""".stripMargin)
        sys.exit()  //退出程序
      }
    val host = args(0)
    val port = args(1)
    val workerName = args(2)
    val masterURL = args(3)


    val config = ConfigFactory.parseString(
      s"""
        |akka.actor.provider = "akka.remote.RemoteActorRefProvider"
        |akka.remote.netty.tcp.hostname = $host
        |akka.remote.netty.tcp.port = $port
        |""".stripMargin)
    val workerActorSystem = ActorSystem("sparkWorker", config)

    //创建自己的ActorRef
    val workerActorRef = workerActorSystem.actorOf(Props(new SparkWorker(masterURL)), workerName)
    workerActorRef ! "started"
  }
}

先启动master,然后传递参数,如图

然后重新启动master,将暴露出来的地址复制

启动worker,会提示输入参数 

 

 填入参数

重新启动worker

效果如下:

可以再启动一个worker

如果不设置一下会出现提示框:不允许并行。

解决方法如下:

效果如图:

 

 

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值