每一个map都可能会产生大量的本地输出,Combiner的作用就是对map端的输出先做一次合并,以减少在map和reduce节点之间的数据传输量,以提高网络IO性能,是MapReduce的一种优化手段之一
下面是一个应用,数据与结果都在代码中做了示范
package first.first_maven;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/*
* 数据文件
* 1.txt
* hadoop
* hadoop
* hello world
*
* 2.txt
* hi hello wh
*
* 结果:
* hadoop 1.txt:2
* hello 1.txt:1;2.txt:1
* world 1.txt:1
* hi 2.txt:1
* wh 2.txt:1
*
*
*
*/
public class IndexDemo {
public static class MyMapper1 extends Mapper<LongWritable, Text, Text, Text>{
/*
* hadoop_1.txt 1
* hadoop_1.txt 1
* hello_1.txt 1
* world_1.txt 1
*
*/
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
// super.map(key, value, context);
//获取数据文件名
InputSplit is=context.getInputSplit();
String filename=((FileSplit)is).getPath().getName();
String words[]=value.toString().split(" ");
for(String word:words){
context.write(new Text(word+"_"+filename), new Text(1+""));
}
}
}
public static class MyCombiner1 extends Reducer<Text, Text, Text, Text>{
/*
* hadoop 1.txt:2
* hello 1.txt:1
* world 1.txt:1
*
*/
@Override
protected void reduce(Text key, Iterable<Text> value,Context context)
throws IOException, InterruptedException {
// super.reduce(key, value, context);
String[] strs=key.toString().split("_");
int count=0;
for(Text v:value){
count+=Integer.parseInt(v.toString());
}
context.write(new Text(strs[0]), new Text(strs[1]+":"+count));
}
}
public static class MyReducer1 extends Reducer<Text, Text, Text, Text>{
@Override
protected void reduce(Text key, Iterable<Text> value,Context context)
throws IOException, InterruptedException {
// super.reduce(key, value, context);
String str="";
for(Text v:value){
str+=v.toString()+";";
}
context.write(key, new Text(str.substring(0,str.length()-1)));
}
}
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
Job job = Job.getInstance(conf, "myjob");
job.setJarByClass(WordCount.class);
job.setMapperClass(MyMapper1.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
FileInputFormat.addInputPath(job,new Path(args[0]));
job.setCombinerClass(MyCombiner1.class);
job.setReducerClass(MyReducer1.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileOutputFormat.setOutputPath(job,new Path(args[1]));
int isok=job.waitForCompletion(true)?0:1;
System.exit(isok);
}
}