1. map任务处理
1.1 读取输入文件内容,解析成key、value对。对输入文件的每一行,解析成key、value对。每一个键值对调用一次map函数。
wcjob.setInputFormatClass(TextInputFormat.class);
InputFormat接口提供了两个方法来实现MapReduce数据源的输入
1.1.1 把输入文件切分成一个一个InputSplit,然后每一个InputSplit分配给一个独立的mapper任务进行处理,InputSplit就是包含输入文件的内容信息
public abstract List<InputSplit> getSplits(JobContext context);
FileInputFormat:
public List<InputSplit> getSplits(JobContext job) throws IOException {
//获取块的最少大小
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
//获取块的最大的size,默认是long.maxvalue
long maxSize = getMaxSplitSize(job);
List<InputSplit> splits = new ArrayList<InputSplit>();
//获取输入文件的相关文件信息
List<FileStatus> files = listStatus(job);
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
//循环切分文件makeSplit(length-bytesRemaining//起始偏移量,splitSize//切片大小),
/**
*如文件大小为500M,切分大小为128M,那么循环makeSplit
*1.makeSplit(128M,128M)
*2.makeSplit(256M,128M)
*3.makeSplit(384M,128M)
*循环切成3个InputSplit */
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.elapsedMillis());
}
return splits;
}
1.1.2 提供一个RecordReader的实现类,把InputSplit的内容一行一行地拆分成<k,v>
public abstract RecordReader<K,V> createRecordReader(InputSplit split,TaskAttemptContext context);
public RecordReader<LongWritable, Text>
createRecordReader(InputSplit split,
TaskAttemptContext context) {
String delimiter = context.getConfiguration().get(
"textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter)
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
return new LineRecordReader(recordDelimiterBytes);
}
public class LineRecordReader extends RecordReader<LongWritable, Text> {
//判断Inputsplit是否有下一行内容,并且读取这一行内容
public boolean nextKeyValue() throws IOException {
if (key == null) {
key = new LongWritable();
}
key.set(pos);
if (value == null) {
value = new Text();
}
int newSize = 0;
// We always read one extra line, which lies outside the upper
// split limit i.e. (end - 1)
while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
if (pos == 0) {
newSize = skipUtfByteOrderMark();
} else {
newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
pos += newSize;
}
if ((newSize == 0) || (newSize < maxLineLength)) {
break;
}
// line too long. try again
LOG.info("Skipped line of size " + newSize + " at pos " +
(pos - newSize));
}
if (newSize == 0) {
key = null;
value = null;
return false;
} else {
return true;
}
}
//每读取一行要先把这一行的key,value值取出去,不然每次调用nextKeyValue()都把值覆盖
public LongWritable getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
}
在mapper端是怎么调用RecordReader方法
public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
。。。。
public abstract class Context
implements MapContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
}
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
//循序读取Inputsplit每一行的内容,每读一行调用一次map函数
while (context.nextKeyValue()) {
map(context.getCurrentKey(), context.getCurrentValue(), context);
}
} finally {
当完成读取一个Inputsplit时,就调用这个cleanup函数
cleanup(context);
}
}
。。。。
}
public class MapContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
extends TaskInputOutputContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT>
implements MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
private RecordReader<KEYIN,VALUEIN> reader;
public KEYIN getCurrentKey() throws IOException, InterruptedException {
return reader.getCurrentKey();
}
public VALUEIN getCurrentValue() throws IOException, InterruptedException {
return reader.getCurrentValue();
}
public boolean nextKeyValue() throws IOException, InterruptedException {
return reader.nextKeyValue();
}
}
context.nextKeyValue() 就是 reader.nextKeyValue();