OAP ParquetDataFile and Cache

92 篇文章 0 订阅
90 篇文章 1 订阅

ParquetDataFile.scala

val iterator = reader.iteratorWithRowIds(requiredIds, rowIds)
      .asInstanceOf[OapCompletionIterator[InternalRow]]
    val result = ArrayBuffer[Int]()
    while (iterator.hasNext) {
      val row: InternalRow = iterator.next()
      assert(row.numFields == 2)
      result += row.getInt(0)
    }

DataFile.scala

private[oap] class OapCompletionIterator[T](inner: Iterator[T], completionFunction: => Unit)
    extends Iterator[T] with Closeable {

  private[this] var completed = false
  override def hasNext: Boolean = {
    val r = inner.hasNext
    if (!r && !completed) {
      completed = true
      completionFunction
    }
    r
  }
  override def next(): T = inner.next()
  override def close(): Unit = {}
}

ParquetDataFile.scala
hasnext()

private class FileRecordReaderIterator[V](private[this] var rowReader: RecordReader[V])
    extends Iterator[V] with Closeable {
    private[this] var havePair = false
    private[this] var finished = false

    override def hasNext: Boolean = {
      if (!finished && !havePair) {
        finished = !rowReader.nextKeyValue
        if (finished) {
          close()
        }
        havePair = !finished
      }
      !finished
    }

    override def next(): V = {
      if (!hasNext) {
        throw new java.util.NoSuchElementException("End of stream")
      }
      havePair = false
      rowReader.getCurrentValue
    }

    override def close(): Unit = {
      if (rowReader != null) {
        try {
          rowReader.close()
        } finally {
          rowReader = null
        }
      }
    }
  }

VectorizedCacheReader.scala

override def nextKeyValue(): Boolean = {
    resultBatch

    if (returnColumnarBatch) {
      return nextBatch
    }

    if (batchIdx >= numBatched) {
      if (!nextBatch) {
        return false
      }
    }
    batchIdx += 1
    true
  }

IndexedVectorizedCacheReader.scala

override def nextBatch: Boolean = {
    // if idsMap is Empty, needn't read remaining data in this row group
    // rowsReturned = totalCountLoadedSoFar to skip remaining data
    if (idsMap.isEmpty) {
      rowsReturned = totalCountLoadedSoFar
    }

    if (rowsReturned >= totalRowCount) {
      return false
    }

    checkEndOfRowGroup()

    var ids = idsMap.remove(currentPageNumber)
    currentPageNumber += 1

    while (ids == null || ids.isEmpty) {
      skipBatchInternal()
      ids = idsMap.remove(currentPageNumber)
      currentPageNumber += 1
    }

    nextBatchInternal()
    if (!returnColumnarBatch) {
      batchIds = ids
      numBatched = ids.size
    }
    true
  }

Cache 部分

//ParquetDataFile.scala
def cache(groupId: Int, fiberId: Int): FiberCache = {
    if (fiberDataReader == null) {
      fiberDataReader =
        ParquetFiberDataReader.open(configuration, file, meta.footer.toParquetMetadata)
    }

    val conf = new Configuration(configuration)
    // setting required column to conf enables us to
    // Vectorized read & cache certain(not all) columns
    addRequestSchemaToConf(conf, Array(fiberId))
    ParquetFiberDataLoader(conf, fiberDataReader, groupId).loadSingleColumn
  }

这边主要是 conf.set
SPARK_ROW_REQUESTED_SCHEMA 变量作用很多

//ParquetDataFile.scala
private def addRequestSchemaToConf(conf: Configuration, requiredIds: Array[Int]): Unit = {
    val requestSchemaString = {
      var requestSchema = new StructType
      for (index <- requiredIds) {
        requestSchema = requestSchema.add(schema(index))
      }
      requestSchema.json
    }
    conf.set(ParquetReadSupportWrapper.SPARK_ROW_REQUESTED_SCHEMA, requestSchemaString)
  }

SPARK_ROW_REQUESTED_SCHEMA 变量作用一
创建 OnHeapColumnVector

// ParquetFiberDataLoader.scala
def loadSingleColumn: FiberCache = {
...
// 作用一
val sparkRequestedSchemaString =
      configuration.get(ParquetReadSupportWrapper.SPARK_ROW_REQUESTED_SCHEMA)
    val sparkSchema = StructType.fromString(sparkRequestedSchemaString)
    assert(sparkSchema.length == 1, s"Only can get single column every time " +
      s"by loadSingleColumn, the columns = ${sparkSchema.mkString}")
    val dataType = sparkSchema.fields(0).dataType
    // Notes: rowIds is IntegerType in oap index.
    val rowCount = reader.getFooter.getBlocks.get(blockId).getRowCount.toInt
    val column = new OnHeapColumnVector(rowCount, dataType)
...
}

SPARK_ROW_REQUESTED_SCHEMA 变量作用二
获取 ColumnDescriptor (Describes a column’s type as well as its position in its containing schema. from parquet lib)

def loadSingleColumn: FiberCache = {
val footer = reader.getFooter
    val fileSchema = footer.getFileMetaData.getSchema
    val fileMetadata = footer.getFileMetaData.getKeyValueMetaData
    val readContext = new ParquetReadSupportWrapper()
      .init(new InitContext(configuration, Collections3.toSetMultiMap(fileMetadata), fileSchema))
    val requestedSchema = readContext.getRequestedSchema
    ...
    val columnDescriptor = requestedSchema.getColumns.get(0)
    val originalType = requestedSchema.asGroupType.getFields.get(0).getOriginalType

// ParquetReadSupportWrapper.scala
/**
   * Proxy ParquetReadSupport#init method.
   */
  override def init(context: InitContext): ReadContext = {
    readSupport.init(context)
  }

这边再次用到了 SPARK_ROW_REQUESTED_SCHEMA
从而得到了 parquetRequestedSchema

// ParquetReadSupport.scala  (spark 引擎层面)
/**
   * Called on executor side before [[prepareForRead()]] and instantiating actual Parquet record
   * readers.  Responsible for figuring out Parquet requested schema used for column pruning.
   */
  override def init(context: InitContext): ReadContext = {
    catalystRequestedSchema = {
      val conf = context.getConfiguration
      val schemaString = conf.get(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA)
      assert(schemaString != null, "Parquet requested schema not set.")
      StructType.fromString(schemaString)
    }

    val parquetRequestedSchema =
      ParquetReadSupport.clipParquetSchema(context.getFileSchema, catalystRequestedSchema)

    new ReadContext(parquetRequestedSchema, Map.empty[String, String].asJava)
  }

index Cache

// BTreeIndexRecordReader.scala
protected def getBTreeFiberCache(
      offset: Long, length: Int, sectionId: Int, idx: Int): FiberCache = {

// readFunc  负责加载,当缓存池里没有数据的时候
    val readFunc =
      () => OapRuntime.getOrCreate.memoryManager.toIndexFiberCache(readData(offset, length))
    val fiber = BTreeFiberId(readFunc, fileReader.getName, sectionId, idx)
    OapRuntime.getOrCreate.fiberCacheManager.get(fiber)
  }
// OapCache.scala
protected def cache(fiber: FiberId): FiberCache = {
    val cache = fiber match {
      case DataFiberId(file, columnIndex, rowGroupId) => file.cache(rowGroupId, columnIndex)
      case BTreeFiberId(getFiberData, _, _, _) => getFiberData.apply()
      case BitmapFiberId(getFiberData, _, _, _) => getFiberData.apply()
      case TestFiberId(getFiberData, _) => getFiberData.apply()
      case _ => throw new OapException("Unexpected FiberId type!")
    }
    cache.fiberId = fiber
    cache
  }
// OapCache.scala
// GuavaOapCache
private val cacheInstance = CacheBuilder.newBuilder()
    .recordStats()
    .removalListener(removalListener)
    .maximumWeight(MAX_WEIGHT)
    .weigher(weigher)
    .concurrencyLevel(CONCURRENCY_LEVEL)
    .build[FiberId, FiberCache](new CacheLoader[FiberId, FiberCache] {
      override def load(key: FiberId): FiberCache = {
        val startLoadingTime = System.currentTimeMillis()
        val fiberCache = cache(key)
        //FiberId 与 fiberCache 映射关系,在这里建立
        incFiberCountAndSize(key, 1, fiberCache.size())
        logDebug(
          "Load missed fiber took %s. Fiber: %s".format(Utils.getUsedTimeMs(startLoadingTime), key))
        _cacheSize.addAndGet(fiberCache.size())
        fiberCache
      }
    })

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值