MapTask切片逻辑
切片逻辑在大数据面试过程中经常会被问及,因此应该明白底层逻辑以及能够用简洁的语言口述出来。所以源码理解主要从四个方面分析:
1.输入路径中存在目录如何处理?
如果输入目录下存在子目录,默认情况下不忽略,但是尽量不要让输入目录下存在子目录。如果不可控制,通过修改配置文件INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS参数对目录进行忽略处理。
2.如何判断当前文件是否可被切分?
当前文件是否可被切分主要是针对压缩类型文件进行判断,如果遇到不可切分的压缩文件的话只能规划成一个切片。
3.如何确定切片大小?
计算切片大小,默认切片大小等于块大小,如果想往大调整切片修改minSize,如果想往小调整切片修改maxSize。
4.如何判断剩余大小是否要继续切分?
循环进行切片,但是要满足byteRemaining/splitSize > 1.1
目的:为了尽量避免小文件出现,让计算资源充分利用。
5、源码分析如下:
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
// minSize默认为1,我们可以通过修改mapreduce.input.fileinputformat.split.minsize 配置项改变其值
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
// maxSize默认为Long.MAX_VALUE 我们可以通过修改mapreduce.input.fileinputformat.split.maxsize 配置项改变其值
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
// 获取当前输入目录的元数据信息
List<FileStatus> files = listStatus(job);
// ignoreDirs默认为false 但是可以通过修改 INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS 值来改变
boolean ignoreDirs = !getInputDirRecursive(job)
&& job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
// 循环遍历输入数据的文件列表 按照单个文件进行切片
for (FileStatus file: files) {
// 1. 如果输入目录下存在子目录,默认情况下不忽略,但是尽量不要让输入目录下存在子目录,如果不可控制,通过修改
// INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS 参数对目录进行忽略处理
if (ignoreDirs && file.isDirectory()) {
continue;
}
Path path = file.getPath();
// 获取当前处理的文件的大小
long length = file.getLen();
// 对当前文件的非空判断
if (length != 0) {
BlockLocation[] blkLocations;
// 区分当前处理的文件是本地存储还是HDFS集群存储
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
// 2. 判断当前文件是否可切分,主要针对压缩类型文件进行判断,如果遇到不可切分的压缩文件的话只能规划成一个切片。
if (isSplitable(job, path)) {
// 获取当前处理文件的块大小
long blockSize = file.getBlockSize();
// 3. 计算切片大小 默认切片大小=块大小
// 如果想往大调整切片大小修改 minSize
// 如果想往小调整切片大小修改 maxSize
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
// 当前文件的剩余大小
long bytesRemaining = length;
// 4. 剩余大小是否继续进行切分
// 循环进行切片,但是要满足 bytesRemaining)/splitSize > 1.1
// 目的:尽量为了避免小文件出现,让计算资源充分的利用
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
【未优化代码】
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = (new StopWatch()).start();
long minSize = Math.max(this.getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
List<InputSplit> splits = new ArrayList();
List<FileStatus> files = this.listStatus(job);
boolean ignoreDirs = !getInputDirRecursive(job) && job.getConfiguration().getBoolean("mapreduce.input.fileinputformat.input.dir.nonrecursive.ignore.subdirs", false);
Iterator var10 = files.iterator();
while(true) {
while(true) {
while(true) {
FileStatus file;
do {
if (!var10.hasNext()) {
job.getConfiguration().setLong("mapreduce.input.fileinputformat.numinputfiles", (long)files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size() + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
file = (FileStatus)var10.next();
} while(ignoreDirs && file.isDirectory());
Path path = file.getPath();
long length = file.getLen();
if (length != 0L) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus)file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0L, length);
}
if (this.isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = this.computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining;
int blkIndex;
for(bytesRemaining = length; (double)bytesRemaining / (double)splitSize > 1.1; bytesRemaining -= splitSize) {
blkIndex = this.getBlockIndex(blkLocations, length - bytesRemaining);
splits.add(this.makeSplit(path, length - bytesRemaining, splitSize, blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts()));
}
if (bytesRemaining != 0L) {
blkIndex = this.getBlockIndex(blkLocations, length - bytesRemaining);
splits.add(this.makeSplit(path, length - bytesRemaining, bytesRemaining, blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts()));
}
} else {
if (LOG.isDebugEnabled() && length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization is possible: " + file.getPath());
}
splits.add(this.makeSplit(path, 0L, length, blkLocations[0].getHosts(), blkLocations[0].getCachedHosts()));
}
} else {
splits.add(this.makeSplit(path, 0L, length, new String[0]));
}
}
}
}
}