第10课:Spark Streaming源码解读之流数据不断接收全生命周期彻底研究和思考

写个大数据应用程序而言性能最重要,架构设计和稳定性

 

源码

ReceiverTracker.scala
start

/** Start the endpoint and receiver execution thread. */
def start(): Unit = synchronized {
 
if (isTrackerStarted) {
   
throw new SparkException("ReceiverTracker already started")
 
}

  if (!receiverInputStreams.isEmpty) {
   
endpoint= ssc.env.rpcEnv.setupEndpoint(
     
"ReceiverTracker", new ReceiverTrackerEndpoint(ssc.env.rpcEnv))
   
if (!skipReceiverLaunch) launchReceivers()
   
logInfo("ReceiverTracker started")
   
trackerState = Started
 
}
}

// This not being null means the tracker has been started and not stopped
private var endpoint: RpcEndpointRef = null

/** RpcEndpoint to receive messages from the receivers. */
private class ReceiverTrackerEndpoint(override val rpcEnv: RpcEnv) extends ThreadSafeRpcEndpoint {

launchReceivers

/**
 
* Get the receivers from the ReceiverInputDStreams, distributes them to the
 * worker nodes as a parallel collection, and runs them.
 */
private def launchReceivers(): Unit = {
 
val receivers = receiverInputStreams.map(nis => {
   
val rcvr = nis.getReceiver()
   
rcvr.setReceiverId(nis.id)
   
rcvr
  })

  runDummySparkJob()

  logInfo("Starting " + receivers.length + " receivers")
 
endpoint.send(StartAllReceivers(receivers))
}

receive

override def receive: PartialFunction[Any, Unit] = {
 
// Local messages
 
case StartAllReceivers(receivers) =>
   
val scheduledLocations = schedulingPolicy.scheduleReceivers(receivers, getExecutors)
   
for (receiver <- receivers) {
     
val executors = scheduledLocations(receiver.streamId)
     
updateReceiverScheduledExecutors(receiver.streamId, executors)
      receiverPreferredLocations(receiver.streamId) = receiver.preferredLocation
     
startReceiver(receiver, executors)
    }
  case RestartReceiver(receiver) =>
   
// Old scheduled executors minus the ones that are not active any more
   
val oldScheduledExecutors = getStoredScheduledExecutors(receiver.streamId)
   
val scheduledLocations = if (oldScheduledExecutors.nonEmpty) {
       
// Try global scheduling again
       
oldScheduledExecutors
     
} else {
       
val oldReceiverInfo = receiverTrackingInfos(receiver.streamId)
       
// Clear "scheduledLocations" to indicate we are going to do local scheduling
       
val newReceiverInfo = oldReceiverInfo.copy(
         
state = ReceiverState.INACTIVE, scheduledLocations = None)
       
receiverTrackingInfos(receiver.streamId) = newReceiverInfo
       
schedulingPolicy.rescheduleReceiver(
         
receiver.streamId,
          receiver.preferredLocation,
          receiverTrackingInfos,
         
getExecutors)
      }
    // Assume there is one receiver restarting at one time, so we don't need to update
   
// receiverTrackingInfos
   
startReceiver(receiver, scheduledLocations)
 
case c: CleanupOldBlocks =>
   
receiverTrackingInfos.values.flatMap(_.endpoint).foreach(_.send(c))
 
case UpdateReceiverRateLimit(streamUID, newRate) =>
   
for (info <- receiverTrackingInfos.get(streamUID); eP <- info.endpoint) {
     
eP.send(UpdateRateLimit(newRate))
    }
  // Remote messages
 
case ReportError(streamId, message, error) =>
   
reportError(streamId, message, error)
}

startReceiver

/**
 
* Start a receiver along with its scheduled executors
 */
private def startReceiver(
   
receiver: Receiver[_],
    scheduledLocations: Seq[TaskLocation]): Unit = {
 
def shouldStartReceiver: Boolean = {
   
// It's okay to start when trackerState is Initialized or Started
   
!(isTrackerStopping || isTrackerStopped)
 
}

  val receiverId = receiver.streamId
 
if (!shouldStartReceiver) {
   
onReceiverJobFinish(receiverId)
    return
 
}

 
val checkpointDirOption = Option(ssc.checkpointDir)
 
val serializableHadoopConf =
   
new SerializableConfiguration(ssc.sparkContext.hadoopConfiguration)

 
// Function to start the receiver on the worker node
 
val startReceiverFunc: Iterator[Receiver[_]] => Unit =
   
(iterator: Iterator[Receiver[_]]) => {
     
if (!iterator.hasNext) {
       
throw new SparkException(
         
"Could not start receiver as object not found.")
   
  }
      if (TaskContext.get().attemptNumber() == 0) {
       
val receiver = iterator.next()
       
assert(iterator.hasNext == false)
       
val supervisor = new ReceiverSupervisorImpl(
         
receiver, SparkEnv.get, serializableHadoopConf.value, checkpointDirOption)
        supervisor.start()
        supervisor.awaitTermination()
      } else {
       
// It's restarted by TaskScheduler, but we want to reschedule it again. So exit it.
     
}
   
}

  // Create the RDD using the scheduledLocations to run the receiver in a Spark job
 
val receiverRDD: RDD[Receiver[_]] =
   
if (scheduledLocations.isEmpty) {
     
ssc.sc.makeRDD(Seq(receiver), 1)
   
} else {
     
val preferredLocations = scheduledLocations.map(_.toString).distinct
     
ssc.sc.makeRDD(Seq(receiver -> preferredLocations))
   
}
  receiverRDD.setName(s"Receiver $receiverId")
 
ssc.sparkContext.setJobDescription(s"Streaming job running receiver $receiverId")
 
ssc.sparkContext.setCallSite(Option(ssc.getStartSite()).getOrElse(Utils.getCallSite()))

  val future = ssc.sparkContext.submitJob[Receiver[_], Unit, Unit](
   
receiverRDD, startReceiverFunc, Seq(0), (_, _) => Unit, ())
 
// We will keep restarting the receiver job until ReceiverTracker is stopped
 
future.onComplete {
   
case Success(_) =>
     
if (!shouldStartReceiver) {
       
onReceiverJobFinish(receiverId)
      } else {
       
logInfo(s"Restarting Receiver $receiverId")
       
self.send(RestartReceiver(receiver))
      }
    case Failure(e) =>
     
if (!shouldStartReceiver) {
       
onReceiverJobFinish(receiverId)
      } else {
       
logError("Receiver has been stopped. Try to restart it.", e)
       
logInfo(s"Restarting Receiver $receiverId")
       
self.send(RestartReceiver(receiver))
      }
  }(submitJobThreadPool)
 
logInfo(s"Receiver ${receiver.streamId} started")
}

ReceiverSupervisorImpl

//构造ReceiverSupervisorImpl

/** Remote RpcEndpointRef for the ReceiverTracker */
private val trackerEndpoint = RpcUtils.makeDriverRef("ReceiverTracker", env.conf, env.rpcEnv)

/** RpcEndpointRef for receiving messages from the ReceiverTracker in the driver */
private val endpoint = env.rpcEnv.setupEndpoint(
 
"Receiver-" + streamId + "-" + System.currentTimeMillis(), new ThreadSafeRpcEndpoint {

ReceiverTracker就是消息通信体

endpoint = ssc.env.rpcEnv.setupEndpoint(
 
"ReceiverTracker", new ReceiverTrackerEndpoint(ssc.env.rpcEnv))

构建了RPC消息通信体

/** RpcEndpointRef for receiving messages from the ReceiverTracker in the driver */
private val endpoint = env.rpcEnv.setupEndpoint(
 
"Receiver-" + streamId + "-" + System.currentTimeMillis(), new ThreadSafeRpcEndpoint {

receive

override def receive: PartialFunction[Any, Unit] = {
 
case StopReceiver =>
   
logInfo("Received stop signal")
   
ReceiverSupervisorImpl.this.stop("Stopped by driver", None)
 
case CleanupOldBlocks(threshTime) =>
   
logDebug("Received delete old batch signal")
   
cleanupOldBlocks(threshTime)
  case UpdateRateLimit(eps) =>
   
logInfo(s"Received a new rate limit: $eps.")
   
registeredBlockGenerators.foreach { bg =>
     
bg.updateRate(eps)
    }
}

UpdateRateLimit动态调整消息通信速度

private[receiver] def updateRate(newRate: Long): Unit =
 
if (newRate > 0) {
   
if (maxRateLimit > 0) {
     
rateLimiter.setRate(newRate.min(maxRateLimit))
   
} else {
     
rateLimiter.setRate(newRate)
   
}
  }

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值