一、基本概念
二次排序就是首先按照第一字段排序,然后再对第一字段相同的行按照第二字段排序,注意不能破坏第一次排序的结果。
二、测试数据(部分)
1 5
1 3
1 4
1 99
2 400
2 240
2 237
2 238
2 239
2 245
三、输出数据(部分)
------------------------------------------------
1 1
1 2
1 3
1 4
1 5
1 6
1 7
1 8
1 9
------------------------------------------------
2 235
2 236
2 237
2 238
2 239
2 240
2 241
2 242
2 243
2 244
四、源代码
package secondarySort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class SecondarySort {
//自己定义的key类应该实现WritableComparable接口
public static class IntPair implements WritableComparable<IntPair> {
int first;
int second;
/**
* Set the left and right values.
*/
public void set(int left, int right) {
first = left;
second = right;
}
public int getFirst() {
return first;
}
public int getSecond() {
return second;
}
@Override
//反序列化,从流中的二进制转换成IntPair
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
first = in.readInt();
second = in.readInt();
}
@Override
//序列化,将IntPair转化成使用流传送的二进制
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeInt(first);
out.writeInt(second);
}
@Override
//key的比较
public int compareTo(IntPair o) {
// TODO Auto-generated method stub
if (first != o.first) {
return first < o.first ? -1 : 1;
} else if (second != o.second) {
return second < o.second ? -1 : 1;
} else {
return 0;
}
}
//新定义类应该重写的两个方法
@Override
//The hashCode() method is used by the HashPartitioner (the default partitioner in MapReduce)
public int hashCode() {
return first * 157 + second;
}
@Override
public boolean equals(Object right) {
if (right == null)
return false;
if (this == right)
return true;
if (right instanceof IntPair) {
IntPair r = (IntPair) right;
return r.first == first && r.second == second;
} else {
return false;
}
}
}
/**
* 分区函数类。根据first确定Partition。
*/
public static class FirstPartitioner extends Partitioner<IntPair,IntWritable>{
@Override
public int getPartition(IntPair key, IntWritable value,
int numPartitions) {
return Math.abs(key.getFirst() * 127) % numPartitions;
}
}
/**
* 分组函数类。只要first相同就属于同一个组。
*/
/*//第一种方法,实现接口RawComparator
public static class GroupingComparator implements RawComparator<IntPair> {
@Override
public int compare(IntPair o1, IntPair o2) {
int l = o1.getFirst();
int r = o2.getFirst();
return l == r ? 0 : (l < r ? -1 : 1);
}
@Override
//一个字节一个字节的比,直到找到一个不相同的字节,然后比这个字节的大小作为两个字节流的大小比较结果。
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2){
// TODO Auto-generated method stub
return WritableComparator.compareBytes(b1, s1, Integer.SIZE/8,
b2, s2, Integer.SIZE/8);
}
}*/
//第二种方法,继承WritableComparator
public static class GroupingComparator extends WritableComparator {
protected GroupingComparator() {
super(IntPair.class, true);
}
@Override
//Compare two WritableComparables.
public int compare(WritableComparable w1, WritableComparable w2) {
IntPair ip1 = (IntPair) w1;
IntPair ip2 = (IntPair) w2;
int l = ip1.getFirst();
int r = ip2.getFirst();
return l == r ? 0 : (l < r ? -1 : 1);
}
}
// 自定义map
public static class Map extends
Mapper<LongWritable, Text, IntPair, IntWritable> {
private final IntPair intkey = new IntPair();
private final IntWritable intvalue = new IntWritable();
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
int left = 0;
int right = 0;
if (tokenizer.hasMoreTokens()) {
left = Integer.parseInt(tokenizer.nextToken());
if (tokenizer.hasMoreTokens())
right = Integer.parseInt(tokenizer.nextToken());
intkey.set(left, right);
intvalue.set(right);
context.write(intkey, intvalue);
}
}
}
// 自定义reduce
//
public static class Reduce extends
Reducer<IntPair, IntWritable, Text, IntWritable> {
private final Text left = new Text();
private static final Text SEPARATOR =
new Text("------------------------------------------------");
public void reduce(IntPair key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
context.write(SEPARATOR, null);
left.set(Integer.toString(key.getFirst()));
for (IntWritable val : values) {
context.write(left, val);
}
}
}
/**
* @param args
*/
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// TODO Auto-generated method stub
// 读取hadoop配置
Configuration conf = new Configuration();
// 实例化一道作业
Job job = new Job(conf, "secondarysort");
job.setJarByClass(SecondarySort.class);
// Mapper类型
job.setMapperClass(Map.class);
// 不再需要Combiner类型,因为Combiner的输出类型<Text, IntWritable>对Reduce的输入类型<IntPair, IntWritable>不适用
//job.setCombinerClass(Reduce.class);
// Reducer类型
job.setReducerClass(Reduce.class);
// 分区函数
job.setPartitionerClass(FirstPartitioner.class);
// 分组函数
job.setGroupingComparatorClass(GroupingComparator.class);
// map 输出Key的类型
job.setMapOutputKeyClass(IntPair.class);
// map输出Value的类型
job.setMapOutputValueClass(IntWritable.class);
// rduce输出Key的类型,是Text,因为使用的OutputFormatClass是TextOutputFormat
job.setOutputKeyClass(Text.class);
// rduce输出Value的类型
job.setOutputValueClass(IntWritable.class);
// 将输入的数据集分割成小数据块splites,同时提供一个RecordReder的实现。
job.setInputFormatClass(TextInputFormat.class);
// 提供一个RecordWriter的实现,负责数据输出。
job.setOutputFormatClass(TextOutputFormat.class);
//判断输入路径是否存在,存在即删除
FileSystem fs= FileSystem.get(URI.create(args[1]), conf);
Path path = new Path(args[1]);
if(fs.exists(path))
{ try
{
fs.delete(path, true);
System.out.println(path.getName() + "删除成功 ");
}catch(Exception e){
System.err.println(e.getMessage());
}
}
// 输入hdfs路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
// 输出hdfs路径
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 提交job
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
五、提升
将代码优化,实现字符串排序。
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import service.plugin.EJob;
public class SecondarySortString {
// 自己定义的key类应该实现WritableComparable接口
public static class IntPair implements WritableComparable<IntPair> {
String first;
String second;
/**
* Set the left and right values.
*/
public void set(String left, String right) {
first = left;
second = right;
}
public String getFirst() {
return first;
}
public String getSecond() {
return second;
}
// 反序列化,从流中的二进制转换成IntPair
public void readFields(DataInput in) throws IOException {
first = in.readUTF();
second = in.readUTF();
}
// 序列化,将IntPair转化成使用流传送的二进制
public void write(DataOutput out) throws IOException {
out.writeUTF(first);
out.writeUTF(second);
}
// 重载 compareTo 方法,进行组合键 key 的比较,该过程是默认行为。
// 分组后的二次排序会隐式调用该方法。
public int compareTo(IntPair o) {
if (!first.equals(o.first)) {
return first.compareTo(o.first);
} else if (!second.equals(o.second)) {
return second.compareTo(o.second);
} else {
return 0;
}
}
// 新定义类应该重写的两个方法
// The hashCode() method is used by the HashPartitioner (the default
// partitioner in MapReduce)
public int hashCode() {
return first.hashCode() * 157 + second.hashCode();
}
public boolean equals(Object right) {
if (right == null)
return false;
if (this == right)
return true;
if (right instanceof IntPair) {
IntPair r = (IntPair) right;
return r.first.equals(first) && r.second.equals(second);
} else {
return false;
}
}
}
/**
* 分区函数类。根据first确定Partition。
*/
public static class FirstPartitioner extends Partitioner<IntPair, Text> {
public int getPartition(IntPair key, Text value, int numPartitions) {
return Math.abs(key.getFirst().hashCode() * 127) % numPartitions;
}
}
/**
* 分组函数类。只要first相同就属于同一个组。
*/
/*
* //第一种方法,实现接口RawComparator public static class GroupingComparator
* implements RawComparator<IntPair> { public int compare(IntPair o1,
* IntPair o2) { int l = o1.getFirst(); int r = o2.getFirst(); return l == r
* ? 0 : (l < r ? -1 : 1); }
* //一个字节一个字节的比,直到找到一个不相同的字节,然后比这个字节的大小作为两个字节流的大小比较结果。 public int
* compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2){ return
* WritableComparator.compareBytes(b1, s1, Integer.SIZE/8, b2, s2,
* Integer.SIZE/8); } }
*/
// 第二种方法,继承WritableComparator
public static class GroupingComparator extends WritableComparator {
protected GroupingComparator() {
super(IntPair.class, true);
}
// Compare two WritableComparables.
// 重载 compare:对组合键按第一个自然键排序分组
public int compare(WritableComparable w1, WritableComparable w2) {
IntPair ip1 = (IntPair) w1;
IntPair ip2 = (IntPair) w2;
String l = ip1.getFirst();
String r = ip2.getFirst();
return l.compareTo(r);
}
}
// 自定义map
public static class Map extends Mapper<LongWritable, Text, IntPair, Text> {
private final IntPair keyPair = new IntPair();
String[] lineArr = null;
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line = value.toString();
if(line.isEmpty()){
return;
}
lineArr = line.split(" ", -1);
keyPair.set(lineArr[0], lineArr[1]);
context.write(keyPair, value);
}
}
// 自定义reduce
public static class Reduce extends Reducer<IntPair, Text, Text, Text> {
private static final Text SEPARATOR = new Text("------------------------------------------------");
public void reduce(IntPair key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
context.write(SEPARATOR, null);
for (Text val : values) {
context.write(null, val);
}
}
}
public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException {
File jarFile = EJob.createTempJar("bin");
ClassLoader classLoader = EJob.getClassLoader();
Thread.currentThread().setContextClassLoader(classLoader);
Configuration conf = new Configuration(true);
String[] otherArgs = new String[2];
otherArgs[0] = "hdfs://192.168.1.100:9000/data/test_in/secondary_sort_data_string.txt";
String time = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
otherArgs[1] = "hdfs://192.168.1.100:9000/data/test_out/mr-" + time;
// 实例化一道作业
Job job = new Job(conf, "secondarysort");
job.setJarByClass(SecondarySort.class);
((JobConf) job.getConfiguration()).setJar(jarFile.toString());
// Mapper类型
job.setMapperClass(Map.class);
// 不再需要Combiner类型,因为Combiner的输出类型<Text,
// IntWritable>对Reduce的输入类型<IntPair, IntWritable>不适用
// job.setCombinerClass(Reduce.class);
// Reducer类型
job.setReducerClass(Reduce.class);
// 分区函数
job.setPartitionerClass(FirstPartitioner.class);
// 分组函数
job.setGroupingComparatorClass(GroupingComparator.class);
// map 输出Key的类型
job.setMapOutputKeyClass(IntPair.class);
// map输出Value的类型
job.setMapOutputValueClass(Text.class);
// rduce输出Key的类型,是Text,因为使用的OutputFormatClass是TextOutputFormat
job.setOutputKeyClass(Text.class);
// rduce输出Value的类型
job.setOutputValueClass(Text.class);
// 将输入的数据集分割成小数据块splites,同时提供一个RecordReder的实现。
job.setInputFormatClass(TextInputFormat.class);
// 提供一个RecordWriter的实现,负责数据输出。
job.setOutputFormatClass(TextOutputFormat.class);
// 输入hdfs路径
FileInputFormat.setInputPaths(job, new Path(otherArgs[0]));
// 输出hdfs路径
// FileSystem.get(conf).delete(new Path(args[1]), true);
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
// 提交job
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}