环境:hadoop1.2.1 + JDK7 +ubuntu10.04
输入:
2
7
5
1
7
6
8
7
输出:
1 1
2 2
3 5
4 6
5 7
6 7
7 7
8 8
1、设计思路
在map过程中就有排序,因此利用这个默认的排序。并将其作为key值输出。reduce得到<Key,value-list>,将key值作为value输出,根据value-list的个数确定输出次数。此外还需要输出num,表示次序。
代码如下:
import java.io.IOException;
import java.util.Iterator;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.*;
public class Sort extends Configured implements Tool {
public static class MapClass extends MapReduceBase
implements Mapper<LongWritable, Text, IntWritable, IntWritable> {
public IntWritable count = new IntWritable();
public void map(LongWritable key, Text value,
OutputCollector<IntWritable, IntWritable> output,
Reporter reporter) throws IOException {
count.set(Integer.parseInt(value.toString()));
output.collect(count, new IntWritable(1));
}
}
public static class Reduce extends MapReduceBase
implements Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public IntWritable valuecount = new IntWritable();
public IntWritable num = new IntWritable();
public int linenum = 0;
public void reduce(IntWritable key, Iterator<IntWritable> values,
OutputCollector<IntWritable, IntWritable> output,
Reporter reporter) throws IOException {
while (values.hasNext()) {
linenum += values.next().get();
num.set(linenum);
output.collect(num, key);
}
}
}
public int run(String[] args) throws Exception {
Configuration conf = getConf();
JobConf job = new JobConf(conf, Sort.class);
Path in = new Path(args[0]);
Path out = new Path(args[1]);
FileInputFormat.setInputPaths(job, in);
FileOutputFormat.setOutputPath(job, out);
job.setJobName("Sort");
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
JobClient.runJob(job);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Sort(), args);
System.exit(res);
}
}
代码分析:
因为这里输入类型使用的是TextInputFormat,它的输出key值是LongWritable类型,输出value值是Text类型,所以map的输入类型是<LongWritable,Text>。这里的输出类型是TextOutputFormat,它的键和值可以是任意类型。这里Map选择所需要的<IntWritable, IntWritable>,map的输出与reduce的输入格式保持一致即可。
运行:
在hadoop目录下,
编译
javac -classpath hadoop-core-1.2.1.jar:lib/commons-cli-1.2.jar -d Sort/classes Sort/src/Sort.java
打包
jar -cvf Sort/Sort.jar -C Sort/classes/ .
运行jar
bin/hadoop jar Sort/Sort.jar Sort /sort.txt output
这里确保HDFS下的/user/root/没有outout目录即可。
查看运行结果:
bin/hadoop fs -cat output/part-00000
1 1
2 2
3 5
4 6
5 7
6 7
7 7
8 8
参考资料:《Hadoop实战》(陆嘉恒)