wordcount是hadoop最简单也是最经典的案例之一。假如我们要计算《You Have Only One Life》中每个单词出现的次数,其思路如下:
数据准备:
编码工可分为三个部分:map阶段、reduce阶段 以及主程序driver阶段
map阶段:
package com.yangmin.mapreduce.wordcount; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; /** * KEYIN:map阶段输入的key的类型:LongWritable * VALUEIN:map阶段输入value的类型:Text * KEYOUT:map阶段输出的key的类型:Text * VALUEOUT:map阶段输出的value的类型:IntWritable */ public class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable> { private Text k = new Text(); private IntWritable v = new IntWritable(1); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1 获取一行 String s = value.toString(); // 2 切割 String[] words = s.split(" "); // 3 输出 for (String word : words) { k.set(word); context.write(k,v); } } }
reduce 阶段:
package com.yangmin.mapreduce.wordcount; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /** * KEYIN: reducer输入阶段的key类型:Text * VALUEIN:reduce输入阶段的value类型:IntWritable * KEYOUT:reduce输出阶段的key类型:Text * VALUEOUT:reduce输出阶段value类型:IntWritable */ public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> { private int sum = 0; private IntWritable v = new IntWritable(); @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { // 1 累加求和 sum = 0; for (IntWritable count : values) { sum += count.get(); } // 2 输出 v.set(sum); context.write(key, v); } }
driver阶段:
package com.yangmin.mapreduce.wordcount; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; import java.util.jar.JarEntry; public class WordCountDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { // 1. 获取配置信息以及获取job对象 Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // 2. 设置jar包路径 job.setJarByClass(WordCountDriver.class); //3. 关联mapper和reducer job.setMapperClass(WordCountMapper.class); job.setReducerClass(WordCountReducer.class); //4.设置map输出的kv类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); //5. 设置最终输出的kv类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //6.设置输出路径和输出路径 FileInputFormat.setInputPaths(job, new Path("C:\\ZProject\\bigdata\\input\\inputword")); FileOutputFormat.setOutputPath(job, new Path("C:\\ZProject\\bigdata\\output\\output1")); //7.提交作业 boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); } }
结果: