WritableComparable排序
排序概述
MapTask和ReduceTask均会对数据按照key进行排序(字典序)--》快速排序
Hadoop框架默认对key进行排序
MapTask
ReduceTask
排序分类
自定义排序
原理分析
bean对象做为key传输,需要实现WritableComparable接口重写compareTo方法,就可以实现排序。
@Override
public int compareTo ( FlowBean o) {
int result;
if ( sumFlow > bean. getSumFlow ( ) ) {
result = - 1 ;
} else if ( sumFlow < bean. getSumFlow ( ) ) {
result = 1 ;
} else {
result = 0 ;
}
return result;
}
排序案例实操(全排序)
1.需求
对手机号总流量进行倒序排序
2.输入数据
13470253144 180 180 360
13509468723 7335 110349 117684
13560439638 918 4938 5856
13568436656 3597 25635 29232
13590439668 1116 954 2070
13630577991 6960 690 7650
13682846555 1938 2910 4848
13729199489 240 0 240
13736230513 2481 24681 27162
13768778790 120 120 240
13846544121 264 0 264
13956435636 132 1512 1644
13966251146 240 0 240
13975057813 11058 48243 59301
13992314666 3008 3720 6728
15043685818 3659 3538 7197
15910133277 3156 2936 6092
15959002129 1938 180 2118
18271575951 1527 2106 3633
18390173782 9531 2412 11943
84188413 4116 1432 5548
3.期望输出数据
13509468723 7335 110349 117684
13736230513 2481 24681 27162
13956435636 132 1512 1644
13846544121 264 0 264
........
4.需求分析
5.代码实现
Bean类
package com. saddam. bigdata. ShangGuiGu. Shuffle . sort;
import org. apache. hadoop. io. WritableComparable ;
import java. io. DataInput ;
import java. io. DataOutput ;
import java. io. IOException ;
public class SortFlowBean implements WritableComparable < SortFlowBean > {
private long upFlow;
private long downFlow;
private long sumFlow;
public SortFlowBean ( ) {
super ( ) ;
}
public SortFlowBean ( long upFlow, long downFlow) {
this . upFlow = upFlow;
this . downFlow = downFlow;
this . sumFlow = this . upFlow+ this . downFlow;
}
public long getUpFlow ( ) {
return upFlow;
}
public void setUpFlow ( long upFlow) {
this . upFlow = upFlow;
}
public long getDownFlow ( ) {
return downFlow;
}
public void setDownFlow ( long downFlow) {
this . downFlow = downFlow;
}
public long getSumFlow ( ) {
return sumFlow;
}
public void setSumFlow ( long sumFlow) {
this . sumFlow = sumFlow;
}
public void setSumFlow ( ) {
sumFlow = upFlow+ downFlow;
}
@Override
public String toString ( ) {
return upFlow+ "\t" + downFlow+ "\t" + sumFlow;
}
@Override
public void write ( DataOutput out) throws IOException {
out. writeLong ( upFlow) ;
out. writeLong ( downFlow) ;
out. writeLong ( sumFlow) ;
}
@Override
public void readFields ( DataInput in) throws IOException {
this . upFlow= in. readLong ( ) ;
this . downFlow= in. readLong ( ) ;
this . sumFlow= in. readLong ( ) ;
}
@Override
public int compareTo ( SortFlowBean bean) {
int result;
if ( sumFlow> bean. getSumFlow ( ) ) {
result = - 1 ;
} else if ( sumFlow< bean. getSumFlow ( ) ) {
result= 1 ;
} else {
result= 0 ;
}
return result;
}
}
Mapper类
package com. saddam. bigdata. ShangGuiGu. Shuffle . sort;
import org. apache. hadoop. io. LongWritable ;
import org. apache. hadoop. io. Text ;
import org. apache. hadoop. mapreduce. Mapper ;
import java. io. IOException ;
public class SortFlowMapper extends Mapper < LongWritable , Text , SortFlowBean , Text > {
Text outV= new Text ( ) ;
SortFlowBean outK= new SortFlowBean ( ) ;
@Override
protected void map ( LongWritable key, Text value, Context context) throws IOException , InterruptedException {
String line= value. toString ( ) ;
String [ ] fields= line. split ( "\t" ) ;
String phone= fields[ 0 ] ;
String up= fields[ 1 ] ;
String down= fields[ 2 ] ;
String sum= fields[ 3 ] ;
outV. set ( phone) ;
outK. setUpFlow ( Long . parseLong ( up) ) ;
outK. setDownFlow ( Long . parseLong ( down) ) ;
outK. setSumFlow ( Long . parseLong ( sum) ) ;
context. write ( outK, outV) ;
}
}
Reducer类
package com. saddam. bigdata. ShangGuiGu. Shuffle . sort;
import org. apache. hadoop. io. Text ;
import org. apache. hadoop. mapreduce. Reducer ;
import java. io. IOException ;
public class SortFlowReducer extends Reducer < SortFlowBean , Text , Text , SortFlowBean > {
@Override
protected void reduce ( SortFlowBean key, Iterable < Text > values, Context context) throws IOException , InterruptedException {
for ( Text value: values) {
context. write ( value, key) ;
}
}
}
Driver类
package com. saddam. bigdata. ShangGuiGu. Shuffle . sort;
import org. apache. hadoop. conf. Configuration ;
import org. apache. hadoop. fs. Path ;
import org. apache. hadoop. io. Text ;
import org. apache. hadoop. mapreduce. Job ;
import org. apache. hadoop. mapreduce. lib. input. FileInputFormat ;
import org. apache. hadoop. mapreduce. lib. output. FileOutputFormat ;
import org. apache. log4j. BasicConfigurator ;
public class SortFlowDriver {
public static void main ( String [ ] args) throws Exception {
BasicConfigurator . configure ( ) ;
Configuration configuration= new Configuration ( ) ;
Job job= Job . getInstance ( configuration) ;
job. setMapperClass ( SortFlowMapper . class ) ;
job. setReducerClass ( SortFlowReducer . class ) ;
job. setMapOutputKeyClass ( SortFlowBean . class ) ;
job. setMapOutputValueClass ( Text . class ) ;
job. setOutputKeyClass ( Text . class ) ;
job. setOutputValueClass ( SortFlowBean . class ) ;
FileInputFormat . setInputPaths ( job, new Path ( "D:\\MR\\MapReduce\\OutputDatas\\output_FlowTotal\\part-r-00000" ) ) ;
FileOutputFormat . setOutputPath ( job, new Path ( "D:\\MR\\MapReduce\\OutputDatas\\output_FlowSort" ) ) ;
job. waitForCompletion ( true ) ;
}
}
排序案例实操(区内排序)
1.需求
要求每个省份手机号输出的文件中按照总流量内部排序。
2.需求分析
基于前一个需求,增加自定义分区类,分区按照省份手机号设置.
3.代码实现
Partitioner类
package com. saddam. bigdata. ShangGuiGu. Shuffle . sort;
import org. apache. hadoop. io. Text ;
import org. apache. hadoop. mapreduce. Partitioner ;
public class PartitionSort extends Partitioner < SortFlowBean , Text > {
@Override
public int getPartition ( SortFlowBean key, Text value, int i) {
String prePhoneNum= value. toString ( ) . substring ( 0 , 3 ) ;
int partition= 4 ;
if ( "136" . equals ( prePhoneNum) ) {
partition = 0 ;
} else if ( "137" . equals ( prePhoneNum) ) {
partition= 1 ;
} else if ( "138" . equals ( prePhoneNum) ) {
partition = 2 ;
} else if ( "139" . equals ( prePhoneNum) ) {
partition = 3 ;
}
return partition;
}
}
Driver类
job. setPartitionerClass ( PartitionSort . class ) ;
job. setNumReduceTasks ( 5 ) ;