Spark SQL下Parquet的数据切分和压缩内幕详解
1、Spark SQL下的Parquet数据切分
2、Spark SQL下的Parquet数据压缩
parquetBlocksize总体上讲是压缩后的大小
private static final Log LOG = Log.getLog(ParquetOutputFormat.class);
public static final String BLOCK_SIZE = “parquet.block.size”;
public static final String PAGE_SIZE = “parquet.page.size”;
public static final String COMPRESSION = “parquet.compression”;
public static final String WRITE_SUPPORT_CLASS = “parquet.write.support.class”;
public static final String DICTIONARY_PAGE_SIZE = “parquet.dictionary.page.size”;
public static final String ENABLE_DICTIONARY = “parquet.enable.dictionary”;
public static final String VALIDATION = “parquet.validation”;
public static final String WRITER_VERSION = “parquet.writer.version”;
public static final String ENABLE_JOB_SUMMARY = “parquet.enable.summary-metadata”;
public static final String MEMORY_POOL_RATIO = “parquet.memory.pool.ratio”;
public static final String MIN_MEMORY_ALLOCATION = “parquet.memory.min.chunk.size”;