手写MapReduce系列之RawComparator<T>接口实现进行数据全排序

前言

这个系列是对MR手写程序的整理,为了加深对MR的理解!

数据源

  手机号           上行  下行  总计
13470253144	180	180	360
13509468723	7335	110349	117684
13560439638	918	4938	5856
13568436656	3597	25635	29232
13590439668	1116	954	2070
13630577991	6960	690	7650
13682846555	1938	2910	4848
13729199489	240	0	240
13736230513	2481	24681	27162
13768778790	120	120	240
13846544121	264	0	264
13956435636	132	1512	1644
13966251146	240	0	240
13975057813	11058	48243	59301
13992314666	3008	3720	6728
15043685818	3659	3538	7197
15910133277	3156	2936	6092
15959002129	1938	180	2118
18271575951	1527	2106	3633
18390173782	9531	2412	11943
84188413	4116	1432	5548
13560439639	918	4938	5856
13560439631	918	4938	5856
13560439632	918	4938	5856
13560439633	918	4938	5856

需求

对总流量进行按照总流量降序全排序

实现

flowbean不实现WritableComparable接口!

package com.zhengkw.rawcomparabletest;

import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.file.tfile.RawComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/**
 * @ClassName:FlowBean
 * @author: zhengkw
 * @description:
 * @date: 20/03/02下午 8:09
 * @version:1.0
 * @since: jdk 1.8
 */
public class FlowBean implements Writable {

    //上行数据
    private Long upFlow;
    //下行数据
    private Long downFlow;
    //总量
    private Long totalFlow;


    public FlowBean() {
    }

    public void setUpFlow(Long upFlow) {
        this.upFlow = upFlow;
    }

    public void setDownFlow(Long downFlow) {
        this.downFlow = downFlow;
    }

    public void setTotalFlow(Long totalFlow) {
        this.totalFlow = totalFlow;
    }

    public Long getUpFlow() {
        return upFlow;
    }

    public Long getDownFlow() {
        return downFlow;
    }

    public Long getTotalFlow() {
        return totalFlow;
    }

    public void set(Long upFlow, Long downFlow) {
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.totalFlow = upFlow + downFlow;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeLong(upFlow);
        out.writeLong(downFlow);
        out.writeLong(totalFlow);
    }

    @Override
    public void readFields(DataInput in) throws IOException {

        this.upFlow = in.readLong();
        this.downFlow = in.readLong();
        this.totalFlow = in.readLong();
    }

    @Override
    public String toString() {
        return upFlow +
                "\t" + downFlow +
                "\t" + totalFlow;
    }
}

RawComparator

package com.zhengkw.rawcomparabletest;

import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;

import java.io.IOException;

/**
 * @ClassName:RawComparatorTest
 * @author: zhengkw
 * @description:
 * @date: 20/03/02下午 8:17
 * @version:1.0
 * @since: jdk 1.8
 */
public class RawComparatorTest implements RawComparator<FlowBean> {

    FlowBean flowBean1 = new FlowBean();
    FlowBean flowBean2 = new FlowBean();
    DataInputBuffer inputBuffer = new DataInputBuffer();

    @Override
    public int compare(FlowBean o1, FlowBean o2) {


/*
        if (o1.getTotalFlow() > o2.getTotalFlow()) {
            return -1;
        } else if (o1.getTotalFlow() == o2.getTotalFlow()) {
            return 0;

        } else return 1;*/

        return - o1.getTotalFlow().compareTo(o2.getTotalFlow());
    }

    /**
     * Compare two objects in binary.
     * b1[s1:l1] is the first object, and b2[s2:l2] is the second object.
     *
     * @param b1 The first byte array.
     * @param s1 The position index in b1. The object under comparison's starting index.
     * @param l1 The length of the object in b1.
     * @param b2 The second byte array.
     * @param s2 The position index in b2. The object under comparison's starting index.
     * @param l2 The length of the object under comparison in b2.
     * @return An integer result of the comparison.
     */
    @Override
    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {

        try {
        //   DataInputBuffer对象装数据
            inputBuffer.reset(b1, s1, l1);
            //对数据由字节数组进行反序列化
            flowBean1.readFields(inputBuffer);
            inputBuffer.reset(b2, s2, l2);
            flowBean2.readFields(inputBuffer);

        } catch (IOException e) {
            e.printStackTrace();
        }
        return compare(flowBean1, flowBean2);
    }


}

Mapper

package com.zhengkw.rawcomparabletest;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * @ClassName:FlowMapper
 * @author: zhengkw
 * @description:
 * @date: 20/03/02下午 8:08
 * @version:1.0
 * @since: jdk 1.8
 */
public class FlowMapper extends Mapper<LongWritable, Text, FlowBean, NullWritable> {


    FlowBean k = new FlowBean();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();
        String[] info = line.split("\t");
         //13470253144	180	180	360  数据分隔符是制表符
        Long upFlow = Long.parseLong(info[info.length - 3]);
        Long downFlow = Long.parseLong(info[info.length - 2]);

        k.set(upFlow, downFlow);
        context.write(k, NullWritable.get());

    }
}

Reduce

package com.zhengkw.rawcomparabletest;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * @ClassName:FlowReducer
 * @author: zhengkw
 * @description:
 * @date: 20/03/02下午 8:09
 * @version:1.0
 * @since: jdk 1.8
 */
public class FlowReducer extends Reducer<FlowBean, NullWritable, FlowBean, NullWritable> {



    @Override
    protected void reduce(FlowBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {


        context.write(key, NullWritable.get());
    }


}

Driver

package com.zhengkw.rawcomparabletest;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * @ClassName:FlowDriver
 * @author: zhengkw
 * @description:
 * @date: 20/03/02下午 8:09
 * @version:1.0
 * @since: jdk 1.8
 */
public class FlowDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Path input = new Path("F:\\mrinput\\sort");
        Path output = new Path("F:/output5");


        Configuration conf = new Configuration();
        //conf.set(KeyValueLineRecordReader.KEY_VALUE_SEPERATOR,"\t");
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(output)) {
            fs.delete(output, true);
        }

        //反射创建对象
        Job job = Job.getInstance(conf);
        //给job指定RawComparator比较器
        job.setSortComparatorClass(RawComparatorTest.class);

        //job.setNumReduceTasks(1);

        //设置3个类
        job.setJarByClass(FlowDriver.class);
        job.setMapperClass(FlowMapper.class);
        job.setReducerClass(FlowReducer.class);

        //设置2个输入输出
        // job.setMapOutputKeyClass(N.class);
        // job.setMapOutputValueClass(FlowBean.class);
//        指定最终输出的数据的kv类型
        job.setOutputKeyClass(FlowBean.class);
        job.setOutputValueClass(NullWritable.class);

//       job.setInputFormatClass(KeyValueTextInputFormat.class);
        //指定输入输出路径
        FileInputFormat.setInputPaths(job, input);
        FileOutputFormat.setOutputPath(job, output);

        //将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}

总结

分析

  • 在map-sort(即map-shuffle阶段)进行比较排序!

  • RawComparator实现后,可以将字节数组反序列化封装成一个InputBuffer对象,利用Writable接口提供的readFields(DataInput in)方法来进行封装成一个bean对象,再利用获取的bean对象再调用compara方法来对bean进行比较!

  • 需要在Driver中指定----
    给job指定RawComparator比较器
    job.setSortComparatorClass(RawComparatorTest.class);
    当Mapper KEYOUT为自定义bean时,那么在sort阶段必须是可排序的,一般排序是按照KEYOUT.class 是否直接或者间接实现了WritableComparable接口,如果实现了就调用系统默认的比较器!如果不是那么会报错!!

  • 源码解析

源码解析

pom依赖

<dependencies>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>RELEASE</version>
        </dependency>
        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-core</artifactId>
            <version>2.8.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.7.2</version>
        </dependency>
        <!--     <dependency>
                   <groupId>jdk.tools</groupId>
                   <artifactId>jdk.tools</artifactId>
                   <version>1.8</version>
                   <scope>system</scope>
                   <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
               </dependency>-->
    </dependencies>
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值