package com.testMr;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
public class WordCount {
public static class WordCountMapper extends
Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
String[] words = value.toString().split(" ");
for (String str : words) {
word.set(str);
context.write(word, one);
}
}
}
public static class WordCountReducer extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int total = 0;
for (IntWritable val : values) {
total++;
}
context.write(key, new IntWritable(total));
}
}
public static void main(String[] args) throws Exception {
String inputPath = args[0];
String outputPath = args[1];
int reduceNum = 1;
Job job = Job.getInstance();
job.setJobName(" WordCount");
job.setJarByClass(WordCount.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setMapperClass(WordCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//job.setOutputValueClass(IntWritable.class);
job.setReducerClass(WordCountReducer.class);
//1job.setPartitionerClass(HashPartitioner.class);
//这两个是配合使用的。一个控制个数 一个指定规则
//2job.setNumReduceTasks(reduceNum);
//job.setOutputKeyClass(Text.class);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
Wordcount
最新推荐文章于 2022-04-29 22:26:58 发布