对第一列排序,如果相同的话,对第二列进行排序
package Test;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
public class NumSort implements WritableComparable<NumSort>{
public long first ;
public long second;
public NumSort(){}
public NumSort(long first, long second) {
super();
this.first = first;
this.second = second;
}
@Override
public String toString() {
return first+"\t"+second;
}
@Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeLong(this.first);
out.writeLong(this.second);
}
@Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
this.first=in.readLong();
this.second=in.readLong();
}
@Override
public int compareTo(NumSort o) {
// TODO Auto-generated method stub
if (this.first!=o.first) {
return Long.compare(o.first,this.first);
}
else {
return Long.compare( o.second,this.second);
}
}
}
package Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class NumSortDemo1 {
public static void main(String[] args) throws Exception {
if (args.length!=2) {
System.exit(0);
}
Job job = new Job(new Configuration(),"NumSortDemo1" );
job.setJarByClass(NumSortDemo1.class);
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setMapperClass(NumSortMap.class);
job.setReducerClass(NumSortReduce.class);
job.setMapOutputKeyClass(NumSort.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(NumSort.class);
job.setOutputValueClass(NullWritable.class);
job.waitForCompletion(true);
}
public static class NumSortMap extends Mapper<LongWritable, Text, NumSort, NullWritable>{
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,NumSort,NullWritable>.Context context) throws java.io.IOException ,InterruptedException {
String[] lines = value.toString().split("\t");
Long first = Long.parseLong(lines[0].trim());
Long second = Long.parseLong(lines[1].trim());
NumSort numSort = new NumSort(first,second);
context.write(numSort, NullWritable.get());
};
}
public static class NumSortReduce extends Reducer<NumSort, NullWritable, NumSort, NullWritable>{
protected void reduce(NumSort key, java.lang.Iterable<NullWritable> values, org.apache.hadoop.mapreduce.Reducer<NumSort,NullWritable,NumSort,NullWritable>.Context context) throws java.io.IOException ,InterruptedException {
context.write(key, NullWritable.get());
};
}
}