这里没有进行具体的讲解,只是在代码中添加了一些根据个人理解的注释,方便以后复习,后面可以会继续完善。
具体说明以及使用方法请参考官方文档:http://hadoop.apache.org/docs/r3.0.0-alpha1/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html#Job_Input
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
public class WordCount3 {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
static enum CountersEnum {
INPUT_WORDS
}
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
private boolean caseSensitive;
private Set<String> patternsToSkip = new HashSet<String>();
private Configuration conf;
private BufferedReader fis;
@Override
public void setup(Context context) throws IOException, InterruptedException {
conf = context.getConfiguration();
caseSensitive = conf.getBoolean("wordcount.case.sensitive", true);
/*
* 官方源码是"conf.getBoolean("wordcount.skip.patterns", true)"
* 这里我改成了"conf.getBoolean("wordcount.skip.patterns", false)"
*/
if (conf.getBoolean("wordcount.skip.patterns", false)) {
URI[] patternsURIs = Job.getInstance(conf).getCacheFiles();
for (URI patternsURI : patternsURIs) {
Path patternsPath = new Path(patternsURI.getPath());
String patternsFileName = patternsPath.getName().toString();
parseSkipFile(patternsFileName);
}
}
}
/**
* 读取本地文件,并将内容(字符)保存到patternsToSkip成员变量中去。
*
* @param fileName
* : the file which save the chars to ignore.
*/
private void parseSkipFile(String fileName) {
try {
fis = new BufferedReader(new FileReader(fileName));
/*
* 此方法是在本地上找文件而无法在hdfs上面读取文件,否则我们需要替换此方法,
* 又或许我们通过-files命令指定本地的文件,然后就不用手动put到hdfs上面了。
*/
String pattern = null;
while ((pattern = fis.readLine()) != null) {
patternsToSkip.add(pattern);
}
} catch (IOException ioe) {
System.err.println(
"Caught exception while parsing the cached file '" + StringUtils.stringifyException(ioe));
}
}
@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
// 原来是要代码来实现大小写忽略!
for (String pattern : patternsToSkip) {
line = line.replaceAll(pattern, "");
// 替换所有的要求被忽略的字符为空字符
}
StringTokenizer itr = new StringTokenizer(line);
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
Counter counter = context.getCounter(CountersEnum.class.getName(), CountersEnum.INPUT_WORDS.toString());
counter.increment(1);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
// 这会把args中的Generic Options加入到Configuration中
String[] remainingArgs = optionParser.getRemainingArgs();
// 返回不属于Generic Options的参数项
if (!(remainingArgs.length != 2 || remainingArgs.length != 4)) {
// 这里可能有错,因为表达式的结果肯定为真.
System.err.println("Usage: wordcount <in> <out> [-skip skipPatternFile]");
System.exit(2);
}
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount3.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
List<String> otherArgs = new ArrayList<String>();
for (int i = 0; i < remainingArgs.length; ++i) {
// 可能会有多个-skip选项,即多个文件包含忽略字符
if ("-skip".equals(remainingArgs[i])) {
job.addCacheFile(new Path(remainingArgs[++i]).toUri());
// 添加到CacheFile,不作为Map-Reduce的输入文件。
job.getConfiguration().setBoolean("wordcount.skip.patterns", true);
// 设置一个新的属性到Configuration,到时候Map-Reduce处理的时候就可以获取相应的属性,属性名自己取。
} else {
otherArgs.add(remainingArgs[i]);
}
}
FileInputFormat.addInputPath(job, new Path(otherArgs.get(0)));
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}