MapReduce02:分区与自定义数据类型

本文介绍了MapReduce的入门代码示例,包括流量汇总数据需求、分区排序汇总等。

一、 流量汇总需求及MapReduce代码示例

map阶段输出的kv数据,通过shuffle分区缓存,将key相同的数据,全部导入同一个reducetask。

概念: 上行流量(Upstream flow)是本机向Internet发送的字节数,下行流量是从网络中下载的字节数。计算每个手机号的上行总流量、下行总流量和二者之和。

1. 自定义数据类型:FlowBean

首先需要新建一个 FlowBean 对象用于存储上下行流量数据:

import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowBean implements Writable {
    private Long upFlow;
    private Long dFlow;
    private Long sumFlow;

//    需要一个无参构造器
    public FlowBean() {
    }

    public FlowBean(Long upFlow, Long dFlow) {
        this.upFlow = upFlow;
        this.dFlow = dFlow;
        // 直接进行计算,效率更高
        this.sumFlow = upFlow + dFlow;
    }

    public Long getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Long upFlow) {
        this.upFlow = upFlow;
    }

    public Long getdFlow() {
        return dFlow;
    }

    public void setdFlow(Long dFlow) {
        this.dFlow = dFlow;
    }

    public Long getSumFlow() {
        return sumFlow;
    }

    public void setSumFlow(Long sumFlow) {
        this.sumFlow = sumFlow;
    }

    /**
     * 序列化写
     * @param dataOutput
     * @throws IOException
     */
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeLong(upFlow);
        dataOutput.writeLong(dFlow);
    }

    /**
     * 反序列化读,需要反射的办法获得类的实例,所以需要一个无参数的构造器
     * @param dataInput
     * @throws IOException
     */
    public void readFields(DataInput dataInput) throws IOException {
        upFlow = dataInput.readLong();
        dFlow = dataInput.readLong();
    }

    @Override
    public String toString() {
        return "FlowBean{" +
                "upFlow=" + this.upFlow +
                ", dFlow=" + this.dFlow +
                ", sumFlow=" + this.sumFlow +
                '}';
    }
}

2. flowCountMapper

mapper类的valueOut输出对象为FlowBean:

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class flowCountMapper extends Mapper<LongWritable, Text,Text,FlowBean>{
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String s = value.toString();
        String[] fields = StringUtils.split(s, "\t");

        String uuid = fields[0];
        Text phone = new Text(fields[1]);

        long upFlow = Long.parseLong(fields[fields.length - 3]);
        long dFlow = Long.parseLong(fields[fields.length - 2]);

        context.write(phone,new FlowBean(upFlow,dFlow));
    }
}

3. flowCountReducer

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class flowCountReducer extends Reducer<Text,FlowBean,Text,FlowBean> {
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        long sumDFlow = 0;
        long sumupFlow = 0;
        for (FlowBean value : values) {
            sumDFlow += value.getdFlow();
            sumupFlow += value.getUpFlow();
        }
        context.write(key,new FlowBean(sumupFlow,sumDFlow));
    }
}

4. 执行命令和网页监控

参考文章:使用Hadoop命令行执行jar包详解(生成jar、将文件上传到dfs、执行命令、下载dfs文件至本地)

执行jar包:将当前目录切换到根目录,使用下面的命令

hadoop jar jar包所在路径 执行的主类路径 设置的输入文件目录参数 设置的输出文件目录参数

二、分区汇总流量代码示例

上一个例子展示的是仅启动一个Reduce计算节点处理数据的内容,但是在实际情况中通常会并发的开启多个Reduce节点优化最后结果统计的性能,减少数据的延迟。在定制化的环境中,开发人员希望符合特定规则的Map数据由指定的Reduce节点进行处理,这时候就需要使用MapReduce分区,也可以自定义分区规则。

若需要指定reducetask数量,可以在任务调度代码中执行:

Job.setNumReduceTasks(n);

下面给出分区流量汇总的代码示例:

需求为计算每个省份对应的上下行流量之和:需要将手机号根据前缀映射到不同的省份。

1. byProvinceFlowCountMapper

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

public class byProvinceFlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
    private FlowBean flowBean = new FlowBean();
    private Text phone = new Text();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String[] s = StringUtils.split(value.toString(), "\t");
        // 没次都创建对象,对垃圾回收的压力很大
        phone.set(s[1]);
        flowBean.setFlowBean(Long.parseLong(s[s.length-3]),Long.parseLong(s[s.length-2]));
        // 写同一个对象是没有问题的:序列化写的过程中写的是不同的值,反序列化的过程中也是值。
        context.write(phone,flowBean);

    }
}

注意:若在mapper和reducer、partition类中每次都创建对象或者读取文件,对垃圾回收的压力或并发读的消耗很大,需要在类的外面设置一个私有的实例或者在类加载的时候将数据读取到内存中。

添加FlowBean类的set方法:

    public void setFlowBean(Long upFlow, Long dFlow) {
        this.upFlow = upFlow;
        this.dFlow = dFlow;
        this.sumFlow = upFlow + dFlow;
    }

2. byProvinceFlowCountReducer

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class byProvinceFlowCountReducer extends Reducer<Text,FlowBean,Text,FlowBean> {
    private FlowBean sumFlowBean = new FlowBean();

    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        long upFlowSum = 0;
        long downFlowSum = 0;

        for (FlowBean value : values) {
            upFlowSum+=value.getUpFlow();
            downFlowSum += value.getdFlow();
        }
        sumFlowBean.setFlowBean(upFlowSum,downFlowSum);
        context.write(key,sumFlowBean);
    }
}

3. 分区规则:byProvinceFlowCountPartitioner

判断数据的分区规则:按照手机号归属的省份进行分区。
手机号前n位的归属地字典表,需要访问数据库。基于对数据并发读的考虑,我们在类加载的时候将整个库加载到内存,而不是写在类里面。
自定义的分区策略代码:

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

import java.util.HashMap;

public class byProviceFlowCountPartitioner extends Partitioner<Text, FlowBean> {
    private static HashMap<String, Integer> provinceMap = new HashMap<String, Integer>();
    static {
        //举个例子,暂时不考虑数据倾斜等情况。
        provinceMap.put("155",0);
        provinceMap.put("158",1);
        provinceMap.put("156",2);
        provinceMap.put("157",3);
    }
    /**
     * 首先需要将partitioner对应的数据
     * @param text
     * @param flowBean
     * @param numPartitions
     * @return
     */

    public int getPartition(Text text, FlowBean flowBean, int numPartitions) {
        String phonePrefix = text.toString().substring(0, 3);
        Integer partition = provinceMap.get(phonePrefix);
        if (partition == null) partition = 4;
        return partition;
    }
}

4. 提交代码

设置分区规则:可以通过显示地指定Partitioner的类来让shuffle过程使用自定义的partitioner类,用于替换系统默认的hashPartitioner。

指定Reduce任务的数量。

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;


public class byProvinceFlowCountJobSubmitter {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        if (args.length < 3) {
            System.err.println("参数错误!");
            System.exit(2);
        }
        Job job = Job.getInstance(new Configuration());

        job.setJarByClass(byProvinceFlowCountJobSubmitter.class);

        job.setMapperClass(byProvinceFlowCountMapper.class);
        job.setReducerClass(byProvinceFlowCountReducer.class);

        /**
         * 如果 mapper 输出的 kv 对的数据类型与 reducer 一致,则省略mapper输出结果的类型指定。
         */
//        job.setMapOutputKeyClass(Text.class);
//        job.setMapOutputValueClass(FlowBean.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        //指定分区规则
        job.setPartitionerClass(byProviceFlowCountPartitioner.class);
        job.setNumReduceTasks(Integer.parseInt(args[2]));

        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));
//        将任务执行的精度打印到客户端中 print the progress to the user
        boolean state = job.waitForCompletion(true);
        System.exit(state?0:1);
    }
}

三、分区排序

在自定义数据类型的时候需依赖接口 WritableComparable

1. 实现分区内的倒序排序

    public int compareTo(FlowBean o) {
        // 倒序排序
        return this.getSumFlow() > o.getSumFlow()?-1:1;
    }

2. 分区汇总排序

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class flowCountSortStep {
    public static class flowCountSortStepMapper extends Mapper<LongWritable, Text, FlowBean, Text> {
        private FlowBean flowBean  = new FlowBean();
        private Text phone_num = new Text();
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            String[] result = line.split("\t");
            String phone = result[0];
            long upFlow = Long.parseLong(result[1]);
            long downFlow = Long.parseLong(result[2]);

            flowBean.setFlowBean(upFlow,downFlow);
            phone_num.set(phone);
            context.write(flowBean,phone_num);
        }
    }


    public static  class flowCountSortStepReducer extends Reducer<FlowBean, Text, Text, FlowBean>{
        /**
         * 在流量汇总排序的过程中不需要做汇总,而且reducer被调用时候拿到的是唯一的kv
         * @param key
         * @param values
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            context.write(values.iterator().next(),key);
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(flowCountSortStep.class);
        job.setMapperClass(flowCountSortStepMapper.class);
        job.setReducerClass(flowCountSortStepReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));
//        将任务执行的精度打印到客户端中 print the progress to the user
        job.waitForCompletion(true);
    }
}

感谢阅读,如有问题,欢迎指正与探讨!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值