MapReduce二次排序
➜ student git:(master) ✗ hadoop dfs -cat /sortTwo/data
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
18/08/05 16:18:55 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
4 3
4 2
4 1
3 4
2 7
2 3
3 1
3 2
3 3
package cn.chen.hd.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
import java.io.IOException;
public class SeconadrySort{
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf, "SeconadrySort");
job.setJarByClass(SeconadrySort.class);
job.setMapperClass(SeconadryMapper.class);
job.setReducerClass(SeconadryReducer.class);
job.setPartitionerClass(KeyPartitioner.class);
job.setSortComparatorClass(SortComparator.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
// job.setGroupingComparatorClass(GroupingComparator.class);
FileInputFormat.addInputPath(job, new Path("hdfs://localhost:9000/sortTwo/data"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:9000/sortTwo/result"));
// 设置Reduce个数为1,这样才是全局排序
job.setNumReduceTasks(1);
System.out.println(job.waitForCompletion(true) ? 0 : 1);
}
}
class SeconadryMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}
class SeconadryReducer extends Reducer<Text, NullWritable, NullWritable, Text> {
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
for(NullWritable value : values){
context.write(NullWritable.get(), key);
}
}
}
// 按照第一个排序字段进行分发
class KeyPartitioner extends HashPartitioner<Text, NullWritable> {
@Override
public int getPartition(Text key, NullWritable value, int numReduceTasks) {
return (key.toString().split("\t")[0].hashCode() & Integer.MAX_VALUE) % numReduceTasks;
}
}
class SortComparator extends WritableComparator {
protected SortComparator() {
super(Text.class, true);
}
@Override
public int compare(WritableComparable key1, WritableComparable key2) {
// 如果第一个排序字段相同,则比较第二个排序字段
if(Integer.parseInt(key1.toString().split("\t")[0]) == Integer.parseInt(key2.toString().split("\t")[0])){
if(Integer.parseInt(key1.toString().split("\t")[1]) > Integer.parseInt(key2.toString().split("\t")[1])){
return 1;
} else if (Integer.parseInt(key1.toString().split("\t")[1]) < Integer.parseInt(key2.toString().split("\t")[1])){
return -1;
} else if (Integer.parseInt(key1.toString().split("\t")[1]) == Integer.parseInt(key2.toString().split("\t")[1])){
return 0;
}
} else {
if(Integer.parseInt(key1.toString().split("\t")[0]) > Integer.parseInt(key2.toString().split("\t")[0])){
return 1;
} else if (Integer.parseInt(key1.toString().split("\t")[0]) < Integer.parseInt(key2.toString().split("\t")[0])){
return -1;
}
}
return 0;
}
}
/**
* 分组器
*/
class GroupingComparator extends WritableComparator{
protected GroupingComparator(){
super(Text.class, true);
}
@Override
public int compare(WritableComparable w1, WritableComparable w2) {
if(Integer.parseInt(w1.toString().split("\t")[0]) == Integer.parseInt(w2.toString().split("\t")[0]){
return 0;
} else if (Integer.parseInt(w1.toString().split("\t")[0]) > Integer.parseInt(w2.toString().split("\t")[0]){
return 1;
} else if (Integer.parseInt(w1.toString().split("\t")[0]) < Integer.parseInt(w2.toString().split("\t")[0]){
return -1;
}
return 0;
}
}
➜ student git:(master) ✗ hadoop dfs -cat /sortTwo/result/part-r-00000
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
18/08/05 16:18:28 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2 3
2 7
3 1
3 2
3 3
3 4
4 1
4 2
4 3
在reduce函数之前按照key的前半部分进行排序,也就是第一个排序字段进行分组,main函数要加上
job.setGroupingComparatorClass(GroupingComparator.class);