MapReduce编程实现WordCount
a…txt文本
i wish to wish the wish you wish to wish,but
if you wish the wish the wish wishes,i won't
wish the wish you wish to wish
Mapper类
package cn.kgc.kb09.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
//实现word count的mapper过程
public class WCMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//分行
String line = value.toString();
//根据空格分单词
String[] words = line.split(" ");
//遍历单词,并分别写出来
for (String word : words) {
context.write(new Text(word), new IntWritable(1));
}
}
}
Reducer类
package cn.kgc.kb09.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WCReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
//定义一个总和
int total=0;
//遍历value
for (IntWritable value : values) {
//把个数加起来
total += value.get();
}
//写出总和
context.write(key,new IntWritable(total));
}
}
Partitioner类
package cn.kgc.kb09.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class WCPartitioner extends Partitioner<Text, IntWritable> {
@Override
public int getPartition(Text text, IntWritable intWritable, int i) {
//输出文档分成i份
return Math.abs(text.hashCode())% i;
}
}
Driver
package cn.kgc.kb09.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WCDriver {
public static void main(String[] args)throws Exception {
//1、建立连接
Configuration cfg = new Configuration();
Job job = Job.getInstance(cfg, "job_wc");
job.setJarByClass(WCDriver.class);
//2、指定mapper和reduce
job.setMapperClass(WCMapper.class);
job.setReducerClass(WCReducer.class);
//3、指定mapper输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//4、指定paritioner
job.setNumReduceTasks(4);
job.setPartitionerClass(WCPartitioner.class);
//5、指定reduce输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//6、指定输入输出路径
FileInputFormat.setInputPaths(job, new Path("file:///D:/test/a.txt"));
FileOutputFormat.setOutputPath(job, new Path("file:///D/test/testwc/"));
//7、运行
boolean result = job.waitForCompletion(true);
System.out.println(result ? "成功" : "失败");
System.exit(result ? 0 : 1);
}
}
输出: