MapReduce编程实例:二次排序

设计思路:

二次排序的含义为先按某列对数据进行排序,在该次排序的基础上再按照另一列的值进行排序:
4 3
4 2
4 1
3 4
2 7
2 3
3 1
3 2
3 3

SecondaryMapper:

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

//在map中,将输入的value作为输出的key输出,value代表一整行数据
public class SecondaryMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
    protected void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
        //仅仅将value作为key输出,不能使用new NullWritable()来定义,获取空值只能NullWritable.get()来获取
        context.write(value, NullWritable.get());
    };

}

SecondaryReducer:

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;


public class SecondaryReducer extends Reducer<Text, IntWritable, NullWritable, Text>{
    protected void reduce(Text key, Iterable<Text> values, Context context) throws java.io.IOException ,InterruptedException {
        for(Text value:values){
            context.write(NullWritable.get(), value);
        }
    };

}

KeyPartitioner:

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;


public class KeyPartitioner extends HashPartitioner<Text, NullWritable>{
    @Override
    public int getPartition(Text key, NullWritable value, int numReduceTasks) {
        return (key.toString().split(" ")[0].hashCode()&Integer.MAX_VALUE)%numReduceTasks;
    }

}

SortComparator :

import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;


public class SortComparator extends WritableComparator{
    public SortComparator() {
        super(Text.class,true);
    }
    @Override
    public int compare(WritableComparable key1, WritableComparable key2) {
        //如果第一个字段相同,则需要比较第二个字段
        if(Integer.parseInt(key1.toString().split(" ")[0])==Integer.parseInt(key2.toString().split(" ")[0])){
            if(Integer.parseInt(key1.toString().split(" ")[1])>Integer.parseInt(key2.toString().split(" ")[1])){
                return 1;
            }else if(Integer.parseInt(key1.toString().split(" ")[1])<Integer.parseInt(key2.toString().split(" ")[1])){
                return -1;
            }else if(Integer.parseInt(key1.toString().split(" ")[1])==Integer.parseInt(key2.toString().split(" ")[1])){
                return 0;
            }
        }
        //如果第一个排序字段不同,则比较第一个排序字段
        else{
            if(Integer.parseInt(key1.toString().split(" ")[0])>Integer.parseInt(key2.toString().split(" ")[0])){
                return 1;
            }else if(Integer.parseInt(key1.toString().split(" ")[0])<Integer.parseInt(key2.toString().split(" ")[0])){
                return -1;
            }
        }
        return 0;
    }

}

GroupingComparator:

import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;

//将按第一个字段进行分组,该字段相同的所有记录将会进入一个集合参与迭代

public class GroupingComparator extends WritableComparator{
    protected GroupingComparator(){
        super(Text.class,true);
    }
    @Override
    public int compare(WritableComparable a, WritableComparable b) {
        if(Integer.parseInt(a.toString().split(" ")[0])==Integer.parseInt(b.toString().split(" ")[0])){
            return 0;
        }else if(Integer.parseInt(a.toString().split(" ")[0])>Integer.parseInt(b.toString().split(" ")[0])){
            return 1;
        }else if(Integer.parseInt(a.toString().split(" ")[0])<Integer.parseInt(b.toString().split(" ")[0])){
            return -1;
        }
        return 0;
    }

}

JobRun:

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;


public class JobRun {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf=new Configuration();
        Job job=new Job(conf,"Sort");
        job.setJarByClass(JobRun.class);
        job.setMapperClass(SecondaryMapper.class);
        job.setReducerClass(SecondaryReducer.class);
        job.setPartitionerClass(KeyPartitioner.class);
        job.setSortComparatorClass(SortComparator.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);

        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);


        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setGroupingComparatorClass(GroupingComparator.class);

        FileInputFormat.setInputPaths(job, new Path("/input/sort"));
        FileOutputFormat.setOutputPath(job, new Path("/output/sort"));

        //将Reducer的个数强制设定为1,这样出来的结果才是全局有序,否则只是在某个Reducer中有序
        job.setNumReduceTasks(1);
        System.out.println(job.waitForCompletion(true)?0:1);

    }

}

二次排序后的结果:
这里写图片描述
数据过程(按自己理解画了个图,也不知道对不对):
这里写图片描述

  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值