Spark底层通信

SparkWorker

package com.zpark.spark

import java.util.UUID

import akka.actor.{Actor, ActorSelection, ActorSystem, Props}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._   //导入时间单位


class SparkWorker(masterUrl: String) extends Actor{

  //master的actorRef
  var masterProxy : ActorSelection = _

  val workerId = UUID.randomUUID().toString;

  override def preStart(): Unit = {
    masterProxy = context.actorSelection(masterUrl)
  }

  override def receive: Receive = {
    //?worker和master进行通信,worker向master注册自己的信息
    case "started" => {
      //自己已经就绪
      //就可以向master注册自己的信息了 id, core, mem
      masterProxy ! RegisterWorkerInfo(workerId, 4, 32*1024)

    }
    case RegisteredWorkerInfo => {
      //master发送过来注册成功消息
      //worker要启动定时器,定时向master发送心跳
      import context.dispatcher
      context.system.scheduler.schedule(0 millis, 1500 millis, self, SendHeartBet)
    }

    case SendHeartBet => {
      //开始向master发送心跳了
      println(s"-------------$workerId 发送心跳-----------------")
      masterProxy ! HeartBeat(workerId) //发送给master
    }
  }
}

object SparkWorker {
  def main(args: Array[String]): Unit = {
    //创建代理对象
    //校验参数
    if(args.length != 4) {
      println(
        """
          |请输入参数:<host><Port><masterURL><workName>
        """.stripMargin
      )
      sys.exit()
    }

    val host = args(0)
    val port = args(1)
    val workName = args(2)
    val masterURL = args(3)

    val config = ConfigFactory.parseString(s"""
                                 |akka.actor.provider = "akka.remote.RemoteActorRefProvider"
                                 |akka.remote.netty.tcp.hostname = $host
                                 |akka.remote.netty.tcp.port = $port
    """.stripMargin)
    val actorSystem = ActorSystem("sparkworker",config)
    //创建自己的ActorRef
    val workerActorRef = actorSystem.actorOf(Props(new SparkWorker(masterURL)),workName)
    //给自己发送一个已启动的消息,表示自己已经就绪
    workerActorRef ! "started"
  }
}

SparkMaster

package com.zpark.spark

import akka.actor.{Actor, ActorSystem, Props}
import akka.actor.FSM.->
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._

class SparkMaster extends Actor{
  //存储worker信息
  val id2WorkerInfo = collection.mutable.HashMap[String, WorkerInfo]()
  override def receive: Receive = {
    //收到worker注册过来的信息
    case RegisterWorkerInfo(wkId, core, ram) => {
      //将worker的信息存储起来到hashmap中
      if(!id2WorkerInfo.contains(wkId)) {
        val workerInfo = new WorkerInfo(wkId, core, ram)
        id2WorkerInfo += ((wkId, workerInfo))

        //master存储完worker注册数据后,告诉worker你已注册成功
        sender() ! RegisteredWorkerInfo  //发送给worker
      }

    }

    case HeartBeat(wkId) => {
      //master收到worker的心跳消息之后,更新worker上一次心跳时间
      val workerInfo = id2WorkerInfo(wkId)
      //更改心跳时间
      val currentTime = System.currentTimeMillis()
      workerInfo.lastHeatBeatTime = currentTime


    }

    case CheckTimeOutWorker => {
      //使用调度器的时候必须导入disparcher,做一些隐式转换
      import context.dispatcher
      context.system.scheduler.schedule(0 millis, 6000 millis, self, RemoveTimeOutWorker)
    }

    case RemoveTimeOutWorker => {
      //遍历map中的数据,取出value,查看当前时间和上一次心跳时间的差,
      val workerInfos = id2WorkerInfo.values
      val currentTime = System.currentTimeMillis();
      //过滤出来超时的worker
      workerInfos.filter(wkInfo => currentTime - wkInfo.lastHeatBeatTime >= 3000).foreach(wk => id2WorkerInfo.remove(wk.id))
      println(s"还剩${id2WorkerInfo.size}存活的worker")
    }
  }
}


//伴生对象
object SparkMaster {
  def main(args: Array[String]): Unit = {
    //创建代理对象
    //校验参数
    if(args.length != 3) {
      println(
        """
          |请输入参数:<host><Port>
        """.stripMargin
      )
      sys.exit()
    }

    val host = args(0)
    val port = args(1)
    val masterName = args(2)

    val config = ConfigFactory.parseString(s"""
                                              |akka.actor.provider = "akka.remote.RemoteActorRefProvider"
                                              |akka.remote.netty.tcp.hostname = $host
                                              |akka.remote.netty.tcp.port = $port
    """.stripMargin)
    val actorSystem = ActorSystem("sparkMaster",config)
    //创建自己的ActorRef
    val masterActorRef = actorSystem.actorOf(Props[SparkMaster],masterName)
    //给自己发送一个已启动的消息,启动一个调度器,定期检测hashmap中超时的worker
    masterActorRef ! CheckTimeOutWorker
  }
}

MessageProtol

package com.zpark.spark

/**
  * worker -> master
  * @param id
  * @param core
  * @param ram
  */
//worker向master注册自己的信息
case class RegisterWorkerInfo(id: String, core: Int, ram : Int)

//存储worker信息的类,必须是普通的
class WorkerInfo(val id: String, core: Int, ram :Int) {
  var lastHeatBeatTime : Long = _
}

/**
  * worker发送给自己的消息,周期性的向master发送心跳消息
  *
  */
case object SendHeartBet

/**
  * master -> worker   master向worker发送注册成功消息
  */
case object RegisteredWorkerInfo

/**
  * worker给master发送心跳信息
  * @param id
  */
case class HeartBeat(id: String)

//master自己给自己发送一个检查超时worker的消息,并启动一个调度器,周期性检测删除超时worker
case object CheckTimeOutWorker

//删除超时的worker, master发送给自己的消息
case object RemoveTimeOutWorker

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值