创建ResultPartition、ResultSubpartition的相关源码
public ResultPartition create(
String taskNameWithSubtaskAndId,
int partitionIndex,
ResultPartitionID id,
ResultPartitionType type,
int numberOfSubpartitions,
int maxParallelism,
SupplierWithException<BufferPool, IOException> bufferPoolFactory) {
BufferCompressor bufferCompressor = null;
if (type.supportCompression() && batchShuffleCompressionEnabled) {
bufferCompressor = new BufferCompressor(networkBufferSize, compressionCodec);
}
ResultSubpartition[] subpartitions = new ResultSubpartition[numberOfSubpartitions];
final ResultPartition partition;
if (type == ResultPartitionType.PIPELINED
|| type == ResultPartitionType.PIPELINED_BOUNDED
|| type == ResultPartitionType.PIPELINED_APPROXIMATE) {
final PipelinedResultPartition pipelinedPartition =
new PipelinedResultPartition(
taskNameWithSubtaskAndId,
partitionIndex,
id,
type,
subpartitions,
maxParallelism,
partitionManager,
bufferCompressor,
bufferPoolFactory);
for (int i = 0; i < subpartitions.length; i++) {
if (type == ResultPartitionType.PIPELINED_APPROXIMATE) {
subpartitions[i] =
new PipelinedApproximateSubpartition(
i, configuredNetworkBuffersPerChannel, pipelinedPartition);
} else {
subpartitions[i] =
new PipelinedSubpartition(
i, configuredNetworkBuffersPerChannel, pipelinedPartition);
}
}
partition = pipelinedPartition;
} else if (type == ResultPartitionType.BLOCKING
|| type == ResultPartitionType.BLOCKING_PERSISTENT) {
if (numberOfSubpartitions >= sortShuffleMinParallelism) {
partition =
new SortMergeResultPartition(
taskNameWithSubtaskAndId,
partitionIndex,
id,
type,
subpartitions.length,
maxParallelism,
batchShuffleReadBufferPool,
batchShuffleReadIOExecutor,
partitionManager,
channelManager.createChannel().getPath(),
bufferCompressor,
bufferPoolFactory);
} else {
final BoundedBlockingResultPartition blockingPartition =
new BoundedBlockingResultPartition(
taskNameWithSubtaskAndId,
partitionIndex,
id,
type,
subpartitions,
maxParallelism,
partitionManager,
bufferCompressor,
bufferPoolFactory);
initializeBoundedBlockingPartitions(
subpartitions,
blockingPartition,
blockingSubpartitionType,
networkBufferSize,
channelManager,
sslEnabled);
partition = blockingPartition;
}
} else if (type == ResultPartitionType.HYBRID_FULL
|| type == ResultPartitionType.HYBRID_SELECTIVE) {
partition =
new HsResultPartition(
taskNameWithSubtaskAndId,
partitionIndex,
id,
type,
subpartitions.length,
maxParallelism,
batchShuffleReadBufferPool,
batchShuffleReadIOExecutor,
partitionManager,
channelManager.createChannel().getPath(),
networkBufferSize,
HybridShuffleConfiguration.builder(
numberOfSubpartitions,
batchShuffleReadBufferPool.getNumBuffersPerRequest())
.setSpillingStrategyType(
type == ResultPartitionType.HYBRID_FULL
? HybridShuffleConfiguration
.SpillingStrategyType.FULL
: HybridShuffleConfiguration
.SpillingStrategyType.SELECTIVE)
.build(),
bufferCompressor,
bufferPoolFactory);
}
return partition;
}
自我介绍一下,小编13年上海交大毕业,曾经在小公司待过,也去过华为、OPPO等大厂,18年进入阿里一直到现在。
深知大多数大数据工程师,想要提升技能,往往是自己摸索成长或者是报班学习,但对于培训机构动则几千的学费,着实压力不小。自己不成体系的自学效果低效又漫长,而且极易碰到天花板技术停滞不前!
因此收集整理了一份《2024年大数据全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升又不知道该从何学起的朋友。
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,基本涵盖了95%以上大数据开发知识点,真正体系化!
由于文件比较大,这里只是将部分目录大纲截图出来,每个节点里面都包含大厂面经、学习笔记、源码讲义、实战项目、讲解视频,并且后续会持续更新
如果你觉得这些内容对你有帮助,可以添加VX:vip204888 (备注大数据获取)
,每个节点里面都包含大厂面经、学习笔记、源码讲义、实战项目、讲解视频,并且后续会持续更新**
如果你觉得这些内容对你有帮助,可以添加VX:vip204888 (备注大数据获取)
[外链图片转存中…(img-WRRUfLHH-1712517157957)]