▶ \blacktriangleright ▶ 组合(合并)
-
意义:对每一个MapTask输出进行局部汇总,减少网络传输量
-
使用的前提是不影响最终的业务逻辑,而且Combiner输出的kv要和Reducer输入的kv对应
-
编写一个类 要 继承Combiner
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class WordCountCombiner extends Reducer<Text, IntWritable,Text,IntWritable> { @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for(IntWritable v : values){ sum+=v.get(); } context.write(key,new IntWritable(sum)); } }
-
编写一个类继承Driver类
import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; public class WordCountOrderDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { Job job = Job.getInstance(); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(WordCountOrderMapper.class); job.setReducerClass(WordCountOrderReduce.class); FileInputFormat.setInputPaths(job,new Path("F:\\test\\hello.txt")); FileOutputFormat.setOutputPath(job,new Path("F:\\test\\helloOutput1")); //当和reduce类相同时可以替代 job.setCombinerClass(WordCountCombiner.class); job.setPartitionerClass(WordCountOrderPartition.class); job.setNumReduceTasks(2); job.waitForCompletion(true); } }