关于MapReduce的大概过程,请看:http://www.cnblogs.com/xia520pi/archive/2012/05/16/2504205.html图文并茂,讲解详细。
package hadoop_test;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCountTest {
public static class TokenizerMapper extends Mapper<Object,Text,Text,IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key,Text value,Context context) throws IOException , InterruptedException{
StringTokenizer st = new StringTokenizer(value.toString());
while(st.hasMoreTokens())
{
word.set(st.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
private IntWritable result = new IntWritable();
public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException,InterruptedException{
int sum=0;
//int[] a = {1,2,3,4};
//for (int i : a){}
for(IntWritable val : values)
sum+=val.get();//get()返回int
result.set(sum);//set()设置int
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
args = new String[2];
args[0]="hdfs://hadoop1:9000/input";
args[1]="hdfs://hadoop1:9000/output";
//获得Configuration配置 Configuration: core-default.xml, core-site.xml
Configuration conf = new Configuration();
//获得输入参数 [hdfs://localhost:9000/input, hdfs://localhost:9000/output]
String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
if (otherArgs.length != 2) {//判断输入参数,不是2个,就是异常退出
System.err.println("usage:wordcount <in> <out>");
System.exit(2);
}
//设置job属性
//有横线,说明函数是旧版本,不建议使用
//Job job = new Job(conf, "WordCountTest");
Job job = Job.getInstance(conf, "WordCountTest");
job.setJarByClass(WordCountTest.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);//将map后到中间结果,在本机进行局部合并
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);//设置Job输出结果<key,word>--<Text,IntWritable>
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));//设置输入和输出路径
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
}
package hadoop_test;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCountTest{
public static class TokenizerMapper extends Mapper<Object,Text,Text,IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key,Text values,Context context) throws IOException,InterruptedException{
StringTokenizer st = new StringTokenizer(values.toString());
while(st.hasMoreTokens()){
word.set(st.nextToken());
context.write(word,one);
}
}
}
public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable>{
private IntWritable result = new IntWritable();
public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException,InterruptedException{
int sum=0;
for(IntWritable i: values)
sum+=i.get();
result.set(sum);
context.write(key,result);
}
}
public static void main(String args[]) throws Exception {
args = new String[2];
args[0]="hdfs://hadoop1:9000/input";
args[1]="hdfs://hadoop1:9000/output";
Configuration conf = new Configuration();
Job job = Job.getInstance(conf,"WordCountTest");
job.setJarByClass(WordCountTest.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
}