10.BlockManager源码分析

先来张原理图吧 !



上面的BlockManagerWorker是在1.3之前的版本中才有的 , 从1.3开始BlockManagerWorker由BlockTransferService替代 . 
 
其实BlockManagerMaster的主要工作是交于BlockManagerMasterActor来处理的:
   
   
  1. /**
  2. * BlockManagerMasterActor is an actor on the master node to track statuses of
  3. * all slaves' block managers.
  4. *
  5. * 其实BlockManagerMasterActor就是负责维护各个executor的BlockManager的元数据
  6. * BlockManagerInfo , BlockStatus
  7. */
  8. private[spark]
  9. class BlockManagerMasterActor(val isLocal: Boolean, conf: SparkConf, listenerBus: LiveListenerBus)
  10. extends Actor with ActorLogReceive with Logging {

该类中有一个内部类就是BlockManagerInfo , 负责存放每一BlockManager的信息并更新和删除block状态的改变:
   
   
  1. /**
  2. * 每一个BlockManager的BlockManagerInfo
  3. */
  4. private[spark] class BlockManagerInfo(
  5. val blockManagerId: BlockManagerId,
  6. timeMs: Long,
  7. val maxMem: Long,
  8. val slaveActor: ActorRef)
  9. extends Logging {
  10. private var _lastSeenMs: Long = timeMs
  11. private var _remainingMem: Long = maxMem
  12. // Mapping from block id to its status.
  13. private val _blocks = new JHashMap[BlockId, BlockStatus]
  14. def getStatus(blockId: BlockId) = Option(_blocks.get(blockId))
  15. def updateLastSeenMs() {
  16. _lastSeenMs = System.currentTimeMillis()
  17. }
  18. def updateBlockInfo(
  19. blockId: BlockId,
  20. storageLevel: StorageLevel,
  21. memSize: Long,
  22. diskSize: Long,
  23. tachyonSize: Long) {
  24. updateLastSeenMs()
  25. /**
  26. * 判断如果内部有这个block
  27. */
  28. if (_blocks.containsKey(blockId)) {
  29. // The block exists on the slave already.
  30. val blockStatus: BlockStatus = _blocks.get(blockId)
  31. val originalLevel: StorageLevel = blockStatus.storageLevel
  32. val originalMemSize: Long = blockStatus.memSize
  33. // 判断如果storagelevel是使用内存那么就给剩余内存数量加上当前的内存量
  34. if (originalLevel.useMemory) {
  35. _remainingMem += originalMemSize
  36. }
  37. }
  38. // 给Block创建一份BlockStatus,然后根据其持久化级别对相应的内存资源进行计算
  39. if (storageLevel.isValid) {
  40. /* isValid means it is either stored in-memory, on-disk or on-Tachyon.
  41. * The memSize here indicates the data size in or dropped from memory,
  42. * tachyonSize here indicates the data size in or dropped from Tachyon,
  43. * and the diskSize here indicates the data size in or dropped to disk.
  44. * They can be both larger than 0, when a block is dropped from memory to disk.
  45. * Therefore, a safe way to set BlockStatus is to set its info in accurate modes. */
  46. if (storageLevel.useMemory) {
  47. _blocks.put(blockId, BlockStatus(storageLevel, memSize, 0, 0))
  48. _remainingMem -= memSize
  49. logInfo("Added %s in memory on %s (size: %s, free: %s)".format(
  50. blockId, blockManagerId.hostPort, Utils.bytesToString(memSize),
  51. Utils.bytesToString(_remainingMem)))
  52. }
  53. if (storageLevel.useDisk) {
  54. _blocks.put(blockId, BlockStatus(storageLevel, 0, diskSize, 0))
  55. logInfo("Added %s on disk on %s (size: %s)".format(
  56. blockId, blockManagerId.hostPort, Utils.bytesToString(diskSize)))
  57. }
  58. if (storageLevel.useOffHeap) {
  59. _blocks.put(blockId, BlockStatus(storageLevel, 0, 0, tachyonSize))
  60. logInfo("Added %s on tachyon on %s (size: %s)".format(
  61. blockId, blockManagerId.hostPort, Utils.bytesToString(tachyonSize)))
  62. }
  63. // 如果StorageLevel是非法的而且之前保存过这个blockId那么就将blockId从内存中删除
  64. } else if (_blocks.containsKey(blockId)) {
  65. // If isValid is not true, drop the block.
  66. val blockStatus: BlockStatus = _blocks.get(blockId)
  67. _blocks.remove(blockId)
  68. if (blockStatus.storageLevel.useMemory) {
  69. logInfo("Removed %s on %s in memory (size: %s, free: %s)".format(
  70. blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.memSize),
  71. Utils.bytesToString(_remainingMem)))
  72. }
  73. if (blockStatus.storageLevel.useDisk) {
  74. logInfo("Removed %s on %s on disk (size: %s)".format(
  75. blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.diskSize)))
  76. }
  77. if (blockStatus.storageLevel.useOffHeap) {
  78. logInfo("Removed %s on %s on tachyon (size: %s)".format(
  79. blockId, blockManagerId.hostPort, Utils.bytesToString(blockStatus.tachyonSize)))
  80. }
  81. }
  82. }

每一个BlockManager刚开始的时候都会向BlockManagerMasterActor发送 " RegisterBlockManager "消息进行注册 , BlockManagerMasterActor接收到消息之后的注册代码如下:
   
   
  1. /**
  2. * 接收消息处理的方法
  3. */
  4. override def receiveWithLogging = {
  5. /**
  6. * 首先BlockManagerMaster需要接收到其它节点的BlockManager注册的消息
  7. */
  8. case RegisterBlockManager(blockManagerId, maxMemSize, slaveActor) =>
  9. // 调用注册方法
  10. register(blockManagerId, maxMemSize, slaveActor)
  11. // 发送注册成功的消息
  12. sender ! true
  13. /**
  14. * 发送Block信息更改
  15. */
  16. case UpdateBlockInfo(
  17. blockManagerId, blockId, storageLevel, deserializedSize, size, tachyonSize) =>
  18. sender ! updateBlockInfo(
  19. blockManagerId, blockId, storageLevel, deserializedSize, size, tachyonSize)

里面会调用register方法 , Block状态更改的消息下面会分析 , 源码如下:
    
    
  1. /**
  2. * BlockManager注册的方法
  3. */
  4. private def register(id: BlockManagerId, maxMemSize: Long, slaveActor: ActorRef) {
  5. val time = System.currentTimeMillis()
  6. // 首先判断一下HashMap中没有指定的BlockManagerId, 说明从来没有注册过那么才会继续往下走去注册这个BlockManager
  7. if (!blockManagerInfo.contains(id)) {
  8. // 根据BlockManager对应的ExecutorId找到对应的BlockManagerInfo
  9. // 做一个安全判断 , 如果没有BlockManagerId那么同步到blockManagerIByExecutorId里面
  10. // 如果BlockManagerIdByExecutor有的话就做一下清理
  11. blockManagerIdByExecutor.get(id.executorId) match {
  12. case Some(oldId) =>
  13. // A block manager of the same executor already exists, so remove it (assumed dead)
  14. logError("Got two different block manager registrations on same executor - "
  15. + s" will replace old one $oldId with new one $id")
  16. // 从内存中移除掉executorId相关的blockManagerInfo
  17. removeExecutor(id.executorId)
  18. case None =>
  19. }
  20. logInfo("Registering block manager %s with %s RAM, %s".format(
  21. id.hostPort, Utils.bytesToString(maxMemSize), id))
  22. // 往blockmanagerIdByExecutor map 中保存一份executorid到blockmanagerId的映射
  23. blockManagerIdByExecutor(id.executorId) = id
  24. // 为BlockManagerId创建一份BlockManagerInfo
  25. // 并往BlockManagerInfo map中保存一份blockmanagerId到blockmanagerInfo的映射
  26. // 到这里注册BlockManager就完咯
  27. blockManagerInfo(id) = new BlockManagerInfo(
  28. id, System.currentTimeMillis(), maxMemSize, slaveActor)
  29. }
  30. listenerBus.post(SparkListenerBlockManagerAdded(time, id, maxMemSize))
  31. }
其实BlockManager的注册就是将BlockManagerId和与这个BlockManager相关联的ExecutorId加入BlockManagerInfo和BlockManagerIdByExecutor中 ,下面的BlockManagerMasterActor的三个成员变量负责存放这些信息:
    
    
  1. // 首先这个map 映射了block manager id 到block manager info 之间的映射关系
  2. // BlockManagerMaster要负责维护每个BlockManager的BlockManagerInfo
  3. // 而Spark中管理数据的最小单位为Block , 同Hadoop一样
  4. private val blockManagerInfo = new mutable.HashMap[BlockManagerId, BlockManagerInfo]
  5. // Mapping from executor ID to block manager ID.
  6. private val blockManagerIdByExecutor = new mutable.HashMap[String, BlockManagerId]
  7. // Mapping from block id to the set of block managers that have the block.
  8. private val blockLocations = new JHashMap[BlockId, mutable.HashSet[BlockManagerId]]


接下来就分析一下BlockManager的源码 , BlockManager运行在每个节点上,包括driver和executor都会有一份,主要提供关联在本地或者远程存取数据的功能 , 并支持内存,磁盘和对外存储
,该类中有一成员变量会对每一个block信息进行缓存,如下:
    
    
  1. // 这里还有一个东西就是每个BlockManager自己会维护一个map在内存中存放一个一个的block块
  2. // 这个块是blockId到blcokInfo的映射
  3. // 每个BlockInfo中是不是就封装了Block的数据
  4. // BlockInfo最大的作用是用于作为多线程访问同一个Block的同步监视器
  5. private val blockInfo = new TimeStampedHashMap[BlockId, BlockInfo]

最先调用BlockManager的initialize方法对一些组件进行初始化:
    
    
  1. def initialize(appId: String): Unit = {
  2. // 在1.3版本之前BlockManager是使用BlockManagerWorker来进行通信的 , 但1.3版本已经改为BlockTransfer
  3. // 所以首先初始化用于block数据传输的BlockTransferService
  4. blockTransferService.init(this)
  5. shuffleClient.init(appId)
  6. //为当前这个BlockManager创建一个唯一的BlockManagerId,使用到了executorId(每个BlockManager都关联一个executor,BlockTransferService的hostname以及port)
  7. // 所以 , 从这个BlockManagerId的初始化即可看出一个BlockManager是通过一个节点上的executor来唯一标识的
  8. blockManagerId = BlockManagerId(
  9. executorId, blockTransferService.hostName, blockTransferService.port)
  10. shuffleServerId = if (externalShuffleServiceEnabled) {
  11. BlockManagerId(executorId, blockTransferService.hostName, externalShuffleServicePort)
  12. } else {
  13. blockManagerId
  14. }
  15. // 使用BlockManagerMasterActor引用进行BlockManager的注册 , 发送消息到BlockManagerMasterActor
  16. master.registerBlockManager(blockManagerId, maxMemory, slaveActor)
  17. // Register Executors' configuration with the local shuffle service, if one should exist.
  18. if (externalShuffleServiceEnabled && !blockManagerId.isDriver) {
  19. registerWithExternalShuffleServer()
  20. }
  21. }

上面的代码中调用registerBlockManager方法使用BlockManagerMasterActor的引用进行BlockManager进行注册,发送消息到BlockManagerMasterActor , 后面就是调用
BlockManagerMasterActor 的register方法了 , 上面已经提到过.


接下来就是数据的获取了 , 分别是从本地获取数据和从远程的节点上获取数据 , 以下是BlockManager从本地获取数据的源码:
    
    
  1. /**
  2. * 从本地获取数据
  3. */
  4. private def doGetLocal(blockId: BlockId, asBlockResult: Boolean): Option[Any] = {
  5. // 首先尝试直接从内存中获取数据
  6. val info = blockInfo.get(blockId).orNull
  7. if (info != null) {
  8. // 对所有的BlockInfo都会进行多线程并发访问的同步操作
  9. // 所有BlockInfo相当于是对一个Block用于作为多线程并发访问的同步监视器
  10. info.synchronized {
  11. // Double check to make sure the block is still there. There is a small chance that the
  12. // block has been removed by removeBlock (which also synchronizes on the blockInfo object).
  13. // Note that this only checks metadata tracking. If user intentionally deleted the block
  14. // on disk or from off heap storage without using removeBlock, this conditional check will
  15. // still pass but eventually we will get an exception because we can't find the block.
  16. if (blockInfo.get(blockId).isEmpty) {
  17. logWarning(s"Block $blockId had been removed")
  18. return None
  19. }
  20. // If another thread is writing the block, wait for it to become ready.
  21. // 如果其他线程在操作这个block那么其实会卡住等待后去BlockInfo的排他锁
  22. // 如果始终没有获取到则返回false
  23. if (!info.waitForReady()) {
  24. // If we get here, the block write failed.
  25. logWarning(s"Block $blockId was marked as failure.")
  26. return None
  27. }
  28. val level = info.level
  29. logDebug(s"Level for block $blockId is $level")
  30. // Look for the block in memory
  31. // 判断如果持久化级别使用了内存比如MEMORY_ONLY,MEMORY_AND_DISK_SER
  32. // 尝试从MemoryStore中获取数据
  33. if (level.useMemory) {
  34. logDebug(s"Getting block $blockId from memory")
  35. val result = if (asBlockResult) {
  36. memoryStore.getValues( ).map(new BlockResult(_, DataReadMethod.Memory, info.size))
  37. } else {
  38. memoryStore.getBytes(blockId)
  39. }
  40. result match {
  41. case Some(values) =>
  42. return result
  43. case None =>
  44. logDebug(s"Block $blockId not found in memory")
  45. }
  46. }
  47. // Look for the block in Tachyon
  48. if (level.useOffHeap) {
  49. logDebug(s"Getting block $blockId from tachyon")
  50. if (tachyonStore.contains(blockId)) {
  51. tachyonStore.getBytes(blockId) match {
  52. case Some(bytes) =>
  53. if (!asBlockResult) {
  54. return Some(bytes)
  55. } else {
  56. return Some(new BlockResult(
  57. dataDeserialize(blockId, bytes), DataReadMethod.Memory, info.size))
  58. }
  59. case None =>
  60. logDebug(s"Block $blockId not found in tachyon")
  61. }
  62. }
  63. }
  64. // Look for block on disk, potentially storing it back in memory if required
  65. // 判断如果持久化级别使用了硬盘持久化
  66. // 尝试从DiskStore中获取数据
  67. if (level.useDisk) {
  68. logDebug(s"Getting block $blockId from disk")
  69. // 通过DiskStore的getBytes获取数据
  70. val bytes: ByteBuffer = diskStore.getBytes(blockId) match {
  71. case Some(b) => b
  72. case None =>
  73. throw new BlockException(
  74. blockId, s"Block $blockId not found on disk, though it should be")
  75. }
  76. assert(0 == bytes.position())
  77. // 如果数据仅仅只是设置了硬盘持久化而没有设置没存持久化存储那么直接将数据封装在BlockResult中返回
  78. if (!level.useMemory) {
  79. // If the block shouldn't be stored in memory, we can just return it
  80. if (asBlockResult) {
  81. return Some(new BlockResult(dataDeserialize(blockId, bytes), DataReadMethod.Disk,
  82. info.size))
  83. } else {
  84. return Some(bytes)
  85. }
  86. } else {
  87. // Otherwise, we also have to store something in the memory store
  88. // 如果数据还设置了内存持久化那么需要将数据通过memoryStore放入内存中
  89. if (!level.deserialized || !asBlockResult) {
  90. /* We'll store the bytes in memory if the block's storage level includes
  91. * "memory serialized", or if it should be cached as objects in memory
  92. * but we only requested its serialized bytes. */
  93. val copyForMemory = ByteBuffer.allocate(bytes.limit)
  94. copyForMemory.put(bytes)
  95. memoryStore.putBytes(blockId, copyForMemory, level)
  96. bytes.rewind()
  97. }
  98. if (!asBlockResult) {
  99. return Some(bytes)
  100. } else {
  101. val values = dataDeserialize(blockId, bytes)
  102. if (level.deserialized) {
  103. // Cache the values before returning them
  104. val putResult = memoryStore.putIterator(
  105. blockId, values, level, returnValues = true, allowPersistToDisk = false)
  106. // The put may or may not have succeeded, depending on whether there was enough
  107. // space to unroll the block. Either way, the put here should return an iterator.
  108. putResult.data match {
  109. case Left(it) =>
  110. return Some(new BlockResult(it, DataReadMethod.Disk, info.size))
  111. case _ =>
  112. // This only happens if we dropped the values back to disk (which is never)
  113. throw new SparkException("Memory store did not return an iterator!")
  114. }
  115. } else {
  116. return Some(new BlockResult(values, DataReadMethod.Disk, info.size))
  117. }
  118. }
  119. }
  120. }
  121. }
  122. } else {
  123. logDebug(s"Block $blockId not registered locally")
  124. }
  125. None
  126. }

代码有点多 , 其实总结下来就两大块 , 从硬盘上获取数据和从内存中获取数据 , 在拿取数据的时候必须是线程安全的 , 防止多个BlockManager来读取同一份数据 , 
首先从内存中获取 , 判断数据是否被内存持久化 , 然后调用MemoryStore的getValues或者getBytes方法获取数据 , 至于调用哪个方法看返回的结果是否为BlockResult , 
而数据若是被硬盘持久化的那么就用DiskStore的getBytes方法获取 , 获取到之后在检查一下是否需要内存持久化 , 是的话还得调用MemoryStore存储在内存中 , 一下是MemoryStore和DiskStore的数据获取方法getVlaues和getBytes :
MemoryStore的:
     
     
  1. // MemoryStore中维护的entries map中其实就是真的存放的是每个block的数据了
  2. // 每个block在内存中的数据用MemoryEntry代表
  3. private val entries = new LinkedHashMap[BlockId, MemoryEntry](32, 0.75f, true)
最后其实block的数据其实是存储在一个类型为HashMap的entries成员变量中了
     
     
  1. override def getBytes(blockId: BlockId): Option[ByteBuffer] = {
  2. // entries也是多线程并发访问同步的
  3. val entry = entries.synchronized {
  4. // 尝试从内存中获取block数据
  5. entries.get(blockId)
  6. }
  7. // 如果没有获取到范湖None
  8. if (entry == null) {
  9. None
  10. // 如果获取到了非序列化的数据
  11. } else if (entry.deserialized) {
  12. // 调用BlockManager的数据序列化方法将数据序列化返回
  13. Some(blockManager.dataSerialize(blockId, entry.value.asInstanceOf[Array[Any]].iterator))
  14. } else {
  15. // 否则直接返回数据
  16. Some(entry.value.asInstanceOf[ByteBuffer].duplicate()) // Doesn't actually copy the data
  17. }
  18. }
  19. override def getValues(blockId: BlockId): Option[Iterator[Any]] = {
  20. val entry = entries.synchronized {
  21. entries.get(blockId)
  22. }
  23. if (entry == null) {
  24. None
  25. // 如果非序列化直接返回
  26. } else if (entry.deserialized) {
  27. Some(entry.value.asInstanceOf[Array[Any]].iterator)
  28. // 如果序列化了那么用BlockManager进行反序列化再返回
  29. } else {
  30. val buffer = entry.value.asInstanceOf[ByteBuffer].duplicate() // Doesn't actually copy data
  31. Some(blockManager.dataDeserialize(blockId, buffer))
  32. }
  33. }
DiskStore的:
     
     
  1. private def getBytes(file: File, offset: Long, length: Long): Option[ByteBuffer] = {
  2. // DiskStore底层使用的是java的nio进行文件的读写操作
  3. val channel = new RandomAccessFile(file, "r").getChannel
  4. try {
  5. // For small files, directly read rather than memory map
  6. if (length < minMemoryMapBytes) {
  7. val buf = ByteBuffer.allocate(length.toInt)
  8. channel.position(offset)
  9. while (buf.remaining() != 0) {
  10. if (channel.read(buf) == -1) {
  11. throw new IOException("Reached EOF before filling buffer\n" +
  12. s"offset=$offset\nfile=${file.getAbsolutePath}\nbuf.remaining=${buf.remaining}")
  13. }
  14. }
  15. buf.flip()
  16. Some(buf)
  17. } else {
  18. Some(channel.map(MapMode.READ_ONLY, offset, length))
  19. }
  20. } finally {
  21. channel.close()
  22. }
  23. }

接下来是从别的节点获取数据:
    
    
  1. /**
  2. * 从别的节点拿取数据
  3. */
  4. private def doGetRemote(blockId: BlockId, asBlockResult: Boolean): Option[Any] = {
  5. require(blockId != null, "BlockId is null")
  6. // 首先BlockManagerMaster上获取每个blockId对应的BlockManager的信息
  7. // 然后打乱
  8. val locations = Random.shuffle(master.getLocations(blockId))
  9. // 遍历每一个BlockManager
  10. for (loc <- locations) {
  11. logDebug(s"Getting remote block $blockId from $loc")
  12. // 使用BlockTransferService进行异步的远程网络获取 , 将block数据传输回来
  13. // 连接的时候使用的BlockManager的唯一标识,就是host,port,executorId
  14. val data = blockTransferService.fetchBlockSync(
  15. loc.host, loc.port, loc.executorId, blockId.toString).nioByteBuffer()
  16. if (data != null) {
  17. if (asBlockResult) {
  18. return Some(new BlockResult(
  19. dataDeserialize(blockId, data),
  20. DataReadMethod.Network,
  21. data.limit()))
  22. } else {
  23. return Some(data)
  24. }
  25. }
  26. logDebug(s"The value of block $blockId is null")
  27. }
  28. logDebug(s"Block $blockId not found")
  29. None
  30. }
上面的master.getLocations(blockId)获取到的数据是在BlockManagerMasterActor中的一个成员变量 , 在获取到这些有数据的BlockManagerId之后在进行遍历拉取数据:
     
     
  1. // 这个变量存储了一个blockId代表的数据在别的节点上有哪些BlockManager拥有 , 若是做了数据的备份那么本地中一个blockId对应这别的节点上的多个BlockManagerId
  2. private val blockLocations = new JHashMap[BlockId, mutable.HashSet[BlockManagerId]]
然后通过blockTransferService的fetchBlockSync异步将数据拉取过来 , 后面再做一些反序列化的操作 , 这就是doGetRemote从远程获取数据 . 


那么既然有读数据那么就有写数据 , 有三步骤:
1.写内存不足的处理机制是什么? -> 先将旧的数据硬盘持久化 , 将新的数据放入内存 , 若还是不行的话那么就将新的数据硬盘持久化
2.写完以后汇报BlockManagerMasterActor
3.如果要复用的话随机挑一个BlockManager通过BlockTransferService将数据传输过去


1.写内存不足的处理机制是什么?
先来看看BlockManager中的doPut()方法的部分源码(因为有点多):
     
     
  1. // 为需要进行存储的block创建一个BlockInfo对象并放入blockInfo map中
  2. val putBlockInfo = {
  3. val tinfo = new BlockInfo(level, tellMaster)
  4. // Do atomically !
  5. val oldBlockOpt = blockInfo.putIfAbsent(blockId, tinfo)
  6. if (oldBlockOpt.isDefined) {
  7. if (oldBlockOpt.get.waitForReady()) {
  8. logWarning(s"Block $blockId already exists on this machine; not re-adding it")
  9. return updatedBlocks
  10. }
  11. // TODO: So the block info exists - but previous attempt to load it (?) failed.
  12. // What do we do now ? Retry on it ?
  13. oldBlockOpt.get
  14. } else {
  15. tinfo
  16. }
  17. }
首先创建一个putBlockInfo 函数 , 在函数中创建BlockInfo对象

然后接下来是对putBlockInfo函数进行枷锁操作:
      
      
  1. // 尝试对blockInfo枷锁,进行多线程并发访问同步
  2. putBlockInfo.synchronized {
  3. logTrace("Put for block %s took %s to get into synchronized block"
  4. .format(blockId, Utils.getUsedTimeMs(startTimeMs)))
  5. var marked = false
  6. try {
  7. // returnValues - Whether to return the values put
  8. // blockStore - The type of storage to put these values into
  9. // 首先根据持久化级别选择一种blockStore,memroyStore,diskStore等
  10. val (returnValues, blockStore: BlockStore) = {
  11. if (putLevel.useMemory) {
  12. // Put it in memory first, even if it also has useDisk set to true;
  13. // We will drop it to disk later if the memory store can't hold it.
  14. (true, memoryStore)
  15. } else if (putLevel.useOffHeap) {
  16. // Use tachyon for off-heap storage
  17. (false, tachyonStore)
  18. } else if (putLevel.useDisk) {
  19. // Don't get back the bytes from put unless we replicate them
  20. (putLevel.replication > 1, diskStore)
  21. } else {
  22. assert(putLevel == StorageLevel.NONE)
  23. throw new BlockException(
  24. blockId, s"Attempted to put block $blockId without specifying storage level!")
  25. }
  26. }
  27. // Actually put the values
  28. // 根据你选择的store , 根据数据的类型将数据放入store中 , 要么是MemoryStore的putBytes要么是DiskStore的putBytes
  29. val result = data match {
  30. case IteratorValues(iterator) =>
  31. blockStore.putIterator(blockId, iterator, putLevel, returnValues)
  32. case ArrayValues(array) =>
  33. blockStore.putArray(blockId, array, putLevel, returnValues)
  34. case ByteBufferValues(bytes) =>
  35. bytes.rewind()
  36. blockStore.putBytes(blockId, bytes, putLevel)
  37. }
  38. size = result.size
  39. result.data match {
  40. case Left (newIterator) if putLevel.useMemory => valuesAfterPut = newIterator
  41. case Right (newBytes) => bytesAfterPut = newBytes
  42. case _ =>
  43. }
  44. // Keep track of which blocks are dropped from memory
  45. if (putLevel.useMemory) {
  46. result.droppedBlocks.foreach { updatedBlocks += _ }
  47. }
  48. // putBlockInfo作为一个函数参数放入getCurrentBlockStatus获取到一个block对应的BlockStatus,putBlockInfo函数的方法会执行
  49. val putBlockStatus = getCurrentBlockStatus(blockId, putBlockInfo)
  50. if (putBlockStatus.storageLevel != StorageLevel.NONE) {
  51. // Now that the block is in either the memory, tachyon, or disk store,
  52. // let other threads read it, and tell the master about it.
  53. marked = true
  54. putBlockInfo.markReady(size)
  55. if (tellMaster) {
  56. // 调用reportBlockStatus()方法将新写入的block数据发送到BlockManagerMasterActor以便于进行block元数据的同步和维护
  57. reportBlockStatus(blockId, putBlockInfo, putBlockStatus)
  58. }
  59. updatedBlocks += ((blockId, putBlockStatus))
  60. }
  61. }
 加锁的代码中需要根据持久化级别选择存储方式 , 根据不同的存储方式调用MemeoryStore的不同存储方法 , 这里详细看一下存储的具体代码:
MemoryStore中不管是putArray或者是putBytes方法都会调用tryToPut将数据优先放入内存,不行的话则尝试移除部分旧数据再将block存入:
      
      
  1. /**
  2. * 优先放入内存 , 不行的话尝试移除部分旧数据再讲block存入
  3. */
  4. private def tryToPut(
  5. blockId: BlockId,
  6. value: Any,
  7. size: Long,
  8. deserialized: Boolean): ResultWithDroppedBlocks = {
  9. /* TODO: Its possible to optimize the locking by locking entries only when selecting blocks
  10. * to be dropped. Once the to-be-dropped blocks have been selected, and lock on entries has
  11. * been released, it must be ensured that those to-be-dropped blocks are not double counted
  12. * for freeing up more space for another block that needs to be put. Only then the actually
  13. * dropping of blocks (and writing to disk if necessary) can proceed in parallel. */
  14. var putSuccess = false
  15. val droppedBlocks = new ArrayBuffer[(BlockId, BlockStatus)]
  16. // 这里必须进行多线程并发同步
  17. // 要是不这样操作的话当你刚判定内存足够放数据的时候但是其它线程也在放那么就OOM了
  18. accountingLock.synchronized {
  19. // 调用ensureFreeSpace方法判断内存是否够用,如果不够用此时会将部分数据用dropFromMemory()方法尝试写入磁盘,但是如果持久化不支持磁盘那么数据丢失
  20. val freeSpaceResult = ensureFreeSpace(blockId, size)
  21. val enoughFreeSpace = freeSpaceResult.success
  22. droppedBlocks ++= freeSpaceResult.droppedBlocks
  23. // 将数据写入内存的时候首先调用enoughFreeSpace()方法 , 判断内存是否够放入数据
  24. if (enoughFreeSpace) {
  25. val entry = new MemoryEntry(value, size, deserialized)
  26. entries.synchronized {
  27. entries.put(blockId, entry)
  28. currentMemory += size
  29. }
  30. val valuesOrBytes = if (deserialized) "values" else "bytes"
  31. logInfo("Block %s stored as %s in memory (estimated size %s, free %s)".format(
  32. blockId, valuesOrBytes, Utils.bytesToString(size), Utils.bytesToString(freeMemory)))
  33. putSuccess = true
  34. } else {
  35. // Tell the block manager that we couldn't put it in memory so that it can drop it to
  36. // disk if the block allows disk storage.
  37. val data = if (deserialized) {
  38. Left(value.asInstanceOf[Array[Any]])
  39. } else {
  40. Right(value.asInstanceOf[ByteBuffer].duplicate())
  41. }
  42. // 调用dropFromMemory尝试将数据写入磁盘,但是如果block的持久化级别没有说可以写入磁盘那么数据就彻底丢啦
  43. val droppedBlockStatus = blockManager.dropFromMemory(blockId, data)
  44. droppedBlockStatus.foreach { status => droppedBlocks += ((blockId, status)) }
  45. }
  46. }
  47. ResultWithDroppedBlocks(putSuccess, droppedBlocks)
  48. }
在做数据存入的时候肯定需要判断内存是否够用 , ensureFreeSpace方法就是这个作用 , 源码如下:
      
      
  1. blockIdToAdd: BlockId,
  2. space: Long): ResultWithDroppedBlocks = {
  3. logInfo(s"ensureFreeSpace($space) called with curMem=$currentMemory, maxMem=$maxMemory")
  4. val droppedBlocks = new ArrayBuffer[(BlockId, BlockStatus)]
  5. if (space > maxMemory) {
  6. logInfo(s"Will not store $blockIdToAdd as it is larger than our memory limit")
  7. return ResultWithDroppedBlocks(success = false, droppedBlocks)
  8. }
  9. // Take into account the amount of memory currently occupied by unrolling blocks
  10. val actualFreeMemory = freeMemory - currentUnrollMemory
  11. // 如果当前内存不足够将这个block放入的话
  12. if (actualFreeMemory < space) {
  13. val rddToAdd = getRddId(blockIdToAdd)
  14. val selectedBlocks = new ArrayBuffer[BlockId]
  15. var selectedMemory = 0L
  16. // This is synchronized to ensure that the set of entries is not changed
  17. // (because of getValue or getBytes) while traversing the iterator, as that
  18. // can lead to exceptions.
  19. // 同步entries
  20. entries.synchronized {
  21. val iterator = entries.entrySet().iterator()
  22. // 尝试从entries中移除一部分数据
  23. while (actualFreeMemory + selectedMemory < space && iterator.hasNext) {
  24. val pair = iterator.next()
  25. val blockId = pair.getKey
  26. if (rddToAdd.isEmpty || rddToAdd != getRddId(blockId)) {
  27. selectedBlocks += blockId
  28. selectedMemory += pair.getValue.size
  29. }
  30. }
  31. }
  32. // 判断如果移除一部分数据之后就可以存放新的block了
  33. if (actualFreeMemory + selectedMemory >= space) {
  34. logInfo(s"${selectedBlocks.size} blocks selected for dropping")
  35. // 将之前选择的要移除的block数据遍历
  36. for (blockId <- selectedBlocks) {
  37. val entry = entries.synchronized { entries.get(blockId) }
  38. // This should never be null as only one thread should be dropping
  39. // blocks and removing entries. However the check is still here for
  40. // future safety.
  41. if (entry != null) {
  42. val data = if (entry.deserialized) {
  43. Left(entry.value.asInstanceOf[Array[Any]])
  44. } else {
  45. Right(entry.value.asInstanceOf[ByteBuffer].duplicate())
  46. }
  47. // 调用dropFromMemory方法将尝试数据写入磁盘 , 但是如果block的持久化级别没有说可以写入磁盘那么这个数据就丢咯
  48. val droppedBlockStatus = blockManager.dropFromMemory(blockId, data)
  49. droppedBlockStatus.foreach { status => droppedBlocks += ((blockId, status)) }
  50. }
  51. }
  52. return ResultWithDroppedBlocks(success = true, droppedBlocks)
  53. } else {
  54. logInfo(s"Will not store $blockIdToAdd as it would require dropping another block " +
  55. "from the same RDD")
  56. return ResultWithDroppedBlocks(success = false, droppedBlocks)
  57. }
  58. }
  59. ResultWithDroppedBlocks(success = true, droppedBlocks)
  60. }
上面在删除一些内存中的老数据时不是真正的删掉 , 而是将数据进行硬盘持久化 , 若是一个block的数据没有设置成硬盘持久化的话那么这份数据就会丢掉 
 而对于硬盘的数据存储就比较简单了 , 源码如下:
      
      
  1. override def putBytes(blockId: BlockId, _bytes: ByteBuffer, level: StorageLevel): PutResult = {
  2. // So that we do not modify the input offsets !
  3. // duplicate does not copy buffer, so inexpensive
  4. // 使用Java NIO 将数据写入磁盘文件
  5. val bytes = _bytes.duplicate()
  6. logDebug(s"Attempting to put block $blockId")
  7. val startTime = System.currentTimeMillis
  8. val file = diskManager.getFile(blockId)
  9. val channel = new FileOutputStream(file).getChannel
  10. while (bytes.remaining > 0) {
  11. channel.write(bytes)
  12. }
  13. channel.close()
  14. val finishTime = System.currentTimeMillis
  15. logDebug("Block %s stored as %s file on disk in %d ms".format(
  16. file.getName, Utils.bytesToString(bytes.limit), finishTime - startTime))
  17. PutResult(bytes.limit(), Right(bytes.duplicate()))
  18. }

写完数据之后就是报告BlockManagerMasterActor了 , 其实就一行代码:
   
   
  1. // 调用reportBlockStatus()方法将新写入的block数据发送到BlockManagerMasterActor以便于进行block元数据的同步和维护
  2. reportBlockStatus(blockId, putBlockInfo, putBlockStatus)
可以深入一下reportBlockStatus这个方法 :
   
   
  1. private def reportBlockStatus(
  2. blockId: BlockId,
  3. info: BlockInfo,
  4. status: BlockStatus,
  5. droppedMemorySize: Long = 0L): Unit = {
  6. val needReregister = !tryToReportBlockStatus(blockId, info, status, droppedMemorySize)
  7. if (needReregister) {
  8. logInfo(s"Got told to re-register updating block $blockId")
  9. // Re-registering will report our new block for free.
  10. asyncReregister()
  11. }
  12. logDebug(s"Told master about block $blockId")
  13. }
  14. /**
  15. * Actually send a UpdateBlockInfo message. Returns the master's response,
  16. * which will be true if the block was successfully recorded and false if
  17. * the slave needs to re-register.
  18. */
  19. private def tryToReportBlockStatus(
  20. blockId: BlockId,
  21. info: BlockInfo,
  22. status: BlockStatus,
  23. droppedMemorySize: Long = 0L): Boolean = {
  24. if (info.tellMaster) {
  25. val storageLevel = status.storageLevel
  26. val inMemSize = Math.max(status.memSize, droppedMemorySize)
  27. val inTachyonSize = status.tachyonSize
  28. val onDiskSize = status.diskSize
  29. master.updateBlockInfo(
  30. blockManagerId, blockId, storageLevel, inMemSize, onDiskSize, inTachyonSize)
  31. } else {
  32. true
  33. }
  34. }
可以看出就是拿取master发送一个更新BlockInfo的消息而已 , 而BlockManagerMasterActor获取到这个消息之后就会调用如下的代码:
   
   
  1. /**
  2. * 更新blockInfo , 即每个BlockManager上的block信息发生变化都会发送updateBlockInfo请求到BlockManagerMaster , 进行BlockInfo的更新
  3. */
  4. private def updateBlockInfo(
  5. blockManagerId: BlockManagerId,
  6. blockId: BlockId,
  7. storageLevel: StorageLevel,
  8. memSize: Long,
  9. diskSize: Long,
  10. tachyonSize: Long): Boolean = {
  11. if (!blockManagerInfo.contains(blockManagerId)) {
  12. if (blockManagerId.isDriver && !isLocal) {
  13. // We intentionally do not register the master (except in local mode),
  14. // so we should not indicate failure.
  15. return true
  16. } else {
  17. return false
  18. }
  19. }
  20. if (blockId == null) {
  21. blockManagerInfo(blockManagerId).updateLastSeenMs()
  22. return true
  23. }
  24. // 调用BlockManager的BlockManagerInfo的updateBlockInfo()方法更新block信息
  25. blockManagerInfo(blockManagerId).updateBlockInfo(
  26. blockId, storageLevel, memSize, diskSize, tachyonSize)
  27. // 每一个blcok可能会在多个BlockManager上面
  28. // 如果将StoreageLevel设置成带着 _2 的这种 , 那么就需要将block replicate一份 , 放到其他BlockManager上
  29. // 而blockLocations map其实是保存了每个blockId对应的BlcokManagerId的set集合
  30. // 所以 这里会更新blockLocations中的信息 , 因为是用set存储BlockManagerId , 因此自动就去重了
  31. var locations: mutable.HashSet[BlockManagerId] = null
  32. if (blockLocations.containsKey(blockId)) {
  33. locations = blockLocations.get(blockId)
  34. } else {
  35. locations = new mutable.HashSet[BlockManagerId]
  36. blockLocations.put(blockId, locations)
  37. }
  38. if (storageLevel.isValid) {
  39. locations.add(blockManagerId)
  40. } else {
  41. locations.remove(blockManagerId)
  42. }
  43. // Remove the block from master tracking if it has been removed on all slaves.
  44. if (locations.size == 0) {
  45. blockLocations.remove(blockId)
  46. }
  47. true
  48. }

最后第三步 , 需要将设置了备份的block数据传输到别的BlockManager上去存储 :
在duPut操作的方法中最后一步有如下的操作:
   
   
  1. // 重要 : 如果我们的持久化级别是定义了_2的这种后缀,说明需要对block进行备份(replica) ,然后传输到其它节点上
  2. if (putLevel.replication > 1) {
  3. data match {
  4. case ByteBufferValues(bytes) =>
  5. if (replicationFuture != null) {
  6. Await.ready(replicationFuture, Duration.Inf)
  7. }
  8. case _ =>
  9. val remoteStartTime = System.currentTimeMillis
  10. // Serialize the block if not already done
  11. if (bytesAfterPut == null) {
  12. if (valuesAfterPut == null) {
  13. throw new SparkException(
  14. "Underlying put returned neither an Iterator nor bytes! This shouldn't happen.")
  15. }
  16. bytesAfterPut = dataSerialize(blockId, valuesAfterPut)
  17. }
  18. // 调用replicate进行复制操作
  19. replicate(blockId, bytesAfterPut, putLevel)
  20. logDebug("Put block %s remotely took %s"
  21. .format(blockId, Utils.getUsedTimeMs(remoteStartTime)))
  22. }
  23. }
  24. BlockManager.dispose(bytesAfterPut)
  25. if (putLevel.replication > 1) {
  26. logDebug("Putting block %s with replication took %s"
  27. .format(blockId, Utils.getUsedTimeMs(startTimeMs)))
  28. } else {
  29. logDebug("Putting block %s without replication took %s"
  30. .format(blockId, Utils.getUsedTimeMs(startTimeMs)))
  31. }
  32. updatedBlocks
其中replicate方法就是对数据进行备份:
   
   
  1. private def replicate(blockId: BlockId, data: ByteBuffer, level: StorageLevel): Unit = {
  2. // Get cached list of peers
  3. peersForReplication ++= getPeers(forceFetch = false)
  4. def getRandomPeer(): Option[BlockManagerId] = {
  5. // If replication had failed, then force update the cached list of peers and remove the peers
  6. // that have been already used
  7. if (replicationFailed) {
  8. peersForReplication.clear()
  9. peersForReplication ++= getPeers(forceFetch = true)
  10. peersForReplication --= peersReplicatedTo
  11. peersForReplication --= peersFailedToReplicateTo
  12. }
  13. if (!peersForReplication.isEmpty) {
  14. Some(peersForReplication(random.nextInt(peersForReplication.size)))
  15. } else {
  16. None
  17. }
  18. }
  19. while (!done) {
  20. // 随机获取一个其它的BlockManager
  21. getRandomPeer() match {
  22. case Some(peer) =>
  23. try {
  24. val onePeerStartTime = System.currentTimeMillis
  25. data.rewind()
  26. logTrace(s"Trying to replicate $blockId of ${data.limit()} bytes to $peer")
  27. // 使用BlockTransferService将数据异步写入其他的BlockManager上
  28. blockTransferService.uploadBlockSync(
  29. peer.host, peer.port, peer.executorId, blockId, new NioManagedBuffer(data), tLevel)
  30. logTrace(s"Replicated $blockId of ${data.limit()} bytes to $peer in %s ms"
  31. .format(System.currentTimeMillis - onePeerStartTime))
  32. peersReplicatedTo += peer
  33. peersForReplication -= peer
  34. replicationFailed = false
  35. if (peersReplicatedTo.size == numPeersToReplicateTo) {
  36. done = true // specified number of peers have been replicated to
  37. }
  38. } catch {
  39. case e: Exception =>
  40. logWarning(s"Failed to replicate $blockId to $peer, failure #$failures", e)
  41. failures += 1
  42. replicationFailed = true
  43. peersFailedToReplicateTo += peer
  44. if (failures > maxReplicationFailures) { // too many failures in replcating to peers
  45. done = true
  46. }
  47. }
  48. case None => // no peer left to replicate to
  49. done = true
  50. }
  51. }
  52. }
随机获取一个BlockManagerId然后通过BlockTransferService将数据写入到其它的BlockManager上去 , 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值