1、首先简单的知道InputFormat是将文件以什么样的形式加在到内存中?
hadoop内置了很多的InputFormat的具体实现的类例如抽象类FileInputFormat,DBInputFormat等等
常用的一般都是FileInputFormat的子类
TextInputFormat 键:当前行的偏移地址 值:当前的行 使用 LineRecordReader来读取 Map的类型<LongWritable,Text>
KeyValueTextInputFormat 键:tab前的字符串 值:tab后的字符串 使用KeyValueLineRecordReader Map的类型<Text,Text>
SequenceFileInputFormat 键:用户自定义 值:用户自定义 使用 SequenceFileRecordReader Map的类型<根据用户的自定义来确定>
public interface InputFormat<K, V> {
InputSplit[] getSplits(JobConf job, int numSplits) throws IOException;
RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter) throws IOException;
}
InputFormat 是一个接口,主要就是产生分片,以及返回一个RecordReader ,供Mapper读取。
JobTracker 主要根据 InputSplit[] 大小,确定Mapper的数量,以及Mapper与InputSplit的工作。
接下来介绍InputFormat内部实现的子类
InputFormat 并没有实现RecordReader ,只是实现了文件的分片FileSplit。
public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
public abstract RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter)
throws IOException;
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
FileStatus[] files = listStatus(job);
// Save the number of input files in the job-conf
job.setLong(NUM_INPUT_FILES, files.length);
long totalSize = 0; // compute total size
for (FileStatus file: files) { // check we have valid files
if (file.isDir()) {
throw new IOException("Not a file: "+ file.getPath());
}
totalSize += file.getLen();
}
long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
long minSize = Math.max(job.getLong("mapred.min.split.size", 1),
minSplitSize);
// generate splits
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
NetworkTopology clusterMap = new NetworkTopology();
for (FileStatus file: files) {
Path path = file.getPath();
FileSystem fs = path.getFileSystem(job);
long length = file.getLen();
BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
if ((length != 0) && isSplitable(fs, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(goalSize, minSize, blockSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
String[] splitHosts = getSplitHosts(blkLocations,
length-bytesRemaining, splitSize, clusterMap);
splits.add(new FileSplit(path, length-bytesRemaining, splitSize,
splitHosts));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkLocations.length-1].getHosts()));
}
} else if (length != 0) {
String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap);
splits.add(new FileSplit(path, 0, length, splitHosts));
} else {
//Create empty hosts array for zero length files
splits.add(new FileSplit(path, 0, length, new String[0]));
}
}
LOG.debug("Total # of splits: " + splits.size());
return splits.toArray(new FileSplit[splits.size()]);
}
具体的实现类TextInputFormat
返回的是LineRecordReader 系统默认的RecordReader
public class TextInputFormat extends FileInputFormat<LongWritable, Text>
implements JobConfigurable {
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit, JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new LineRecordReader(job, (FileSplit) genericSplit);
}
}
//数据分片是个接口
public interface InputSplit extends Writable {
long getLength() throws IOException;
String[] getLocations() throws IOException;
}
public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit
implements InputSplit {
public long getLength() { return length; }
public String[] getLocations() throws IOException {
if (this.hosts == null) {
return new String[]{};
} else {
return this.hosts;
}
}
}
public interface InputSplit extends Writable {
long getLength() throws IOException;
String[] getLocations() throws IOException;
}