MapTask和ReductTask都会按照key进行排序,该操作属于Hadoop默认的行为。
任何应用程序中的数据都会被排序,强制的,而不管逻辑上是否需要。
默认排序是按照字典顺序排序,方法的是快速排序
MapTask 会将处理的结果暂时放置到环形缓存区中,当环形缓存区使用率到达一定阈值后,会对缓冲区的数据进行一次快速排序,并且将这些有序数据溢写到磁盘上,当数据处理完毕后,还会对磁盘上所有文件进行归并排序。
ReduceTask它是从每个MapTask上远超拷贝相应的数据文件,如果文件大小超过一定的阈值,则溢写磁盘上,否则存在内存中。
如果磁盘上文件数目达到一定的阈值,则进行一次归并排序以生成一个更大的文件,如果内存中文件大小或者数目超过一定阈值,则进行一次合并后将数据溢写到磁盘上。当所有的数据拷贝完后,ReduceTask统一对内存和磁盘上的所有数据进行一次归并排序。
bean 对象做为 key 传输,需要实现 WritableComparable 接口重写 compareTo 方法,就可
以实现排序。
1.创建FlowBean 实现WritableComparable,重写compareTo接口
import org.apache.hadoop.io.WritableComparable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** * 1、定义类实现WritableComparable接口 * 2、重写序列化和反序列化方法,重写排序方法 * 3、重写空参构造 * 4、toString方法 * * @author sun */ public class FlowBean implements WritableComparable<FlowBean> { /** * 上行流量 */ private long upFlow; /** * 下行流量 */ private long downFlow; /** * 总流量 */ private long sumFlow; // 空参构造 public FlowBean() { } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getDownFlow() { return downFlow; } public void setDownFlow(long downFlow) { this.downFlow = downFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow(long sumFlow) { this.sumFlow = sumFlow; } public void setSumFlow() { this.sumFlow = this.upFlow + this.downFlow; } @Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); } @Override public void readFields(DataInput in) throws IOException { this.upFlow = in.readLong(); this.downFlow = in.readLong(); this.sumFlow = in.readLong(); } @Override public String toString() { return upFlow + "\t" + downFlow + "\t" + sumFlow; } @Override public int compareTo(FlowBean o) { // 总流量的倒序排序 if (this.sumFlow > o.sumFlow) { return -1; } else if (this.sumFlow < o.sumFlow) { return 1; } else { // 按照上行流量的正序排 if (this.upFlow > o.upFlow) { return 1; } else if (this.upFlow < o.upFlow) { return -1; } else { return 0; } } } }
2.创建FlowMapper继承Mapper 重写map方法
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; /** * @author sun */ public class FlowMapper extends Mapper<LongWritable, Text, FlowBean, Text> { private FlowBean outK = new FlowBean(); private Text outV = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 获取一行 String line = value.toString(); // 切割 String[] split = line.split("\t"); // 封装 outV.set(split[0]); outK.setUpFlow(Long.parseLong(split[1])); outK.setDownFlow(Long.parseLong(split[2])); outK.setSumFlow(); // 写出 context.write(outK, outV); } }
3.创建FlowReducer
import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /** * @author sun */ public class FlowReducer extends Reducer<FlowBean, Text, Text, FlowBean> { @Override protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException { for (Text value : values) { context.write(value,key); } } }
4.创建FlowDriver
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; /** * @author sun */ public class FlowDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { // 1 获取job Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // 2 设置jar job.setJarByClass(FlowDriver.class); // 3 关联mapper 和Reducer job.setMapperClass(FlowMapper.class); job.setReducerClass(FlowReducer.class); // 4 设置mapper 输出的key和value类型 job.setMapOutputKeyClass(FlowBean.class); job.setMapOutputValueClass(Text.class); // 5 设置最终数据输出的key和value类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); // 6 设置数据的输入路径和输出路径 FileInputFormat.setInputPaths(job, new Path("D:\\hadoop\\output4")); FileOutputFormat.setOutputPath(job, new Path("D:\\hadoop\\output6")); // 7 提交job boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); } }