查看Mapper源码可以发现,在每次进行map方法之前会执行nextKeyValue、getCurrentKey、getCurrentValue方法,它们分别代表:判断接下来是否还有Key Value pairs,如果还有则继续执行map方法;获得下一次执行map的Key;获得下一次执行map的Value。所以如果要自定义FileInputFormat就要重写这几个方法。
以下是这次实验的项目结构目录
以下分别是TestFileInputFormat、TestRecordReader、InputFormatDriver类
package com.atguigu.mapreduce.FileInputFormatTest;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
public class TestFileInputFormat extends FileInputFormat<Text, IntWritable> {
@Override
public RecordReader<Text, IntWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
return new TestRecordReader();
}
}
经过Debug后,发现initialize方法只会执行一次,就起到相当于构造函数一样的作用;close函数也只会在最后Mapper阶段结束后执行一次,关闭所有的流。
package com.atguigu.mapreduce.FileInputFormatTest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.util.LineReader;
import java.io.IOException;
public class TestRecordReader extends RecordReader<Text, IntWritable> {
private FileSplit fileSplit;
private JobContext jobContext;
private Text inputKey;
private IntWritable inputValue;
private LineReader in;
private Text line;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
this.fileSplit = (FileSplit) split;
this.jobContext = context;
Configuration conf = context.getConfiguration();
Path file = this.fileSplit.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream fileIn = fs.open(file);
in = new LineReader(fileIn, conf);
line = new Text();
inputKey = new Text();
inputValue = new IntWritable(1);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
int lineSize = in.readLine(line);
if (lineSize == 0) return false;
String[] pieces = line.toString().split(" ");
inputKey.set(pieces[0]);
return true;
}
@Override
public Text getCurrentKey() throws IOException, InterruptedException {
return this.inputKey;
}
@Override
public IntWritable getCurrentValue() throws IOException, InterruptedException {
return this.inputValue;
}
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
@Override
public void close() throws IOException {
IOUtils.closeStream(in);
}
}
和预想的一样,每次在执行map方法之前会调用以上的nextKeyValue、getCurrentKey、getCurrentValue方法获取 key value pairs 的信息,在这三个重写方法中就可以实现特定的业务逻辑
package com.atguigu.mapreduce.FileInputFormatTest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class InputFormatDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(InputFormatDriver.class);
job.setMapperClass(InputFormatMapper.class);
job.setReducerClass(InputFormatReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 自定义FileInputFormat
job.setInputFormatClass(TestFileInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("D:\\BaiduNetdiskDownload\\11_input\\inputfileinputformat"));
FileOutputFormat.setOutputPath(job, new Path("D:\\hadoop\\output666"));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}