MapReduce案例-流量统计

需求一:统计求和

 

统计每个手机号的上行数据包总和,下行数据包总和,上行总流量之和,下行总流量之和 分
析:以手机号码作为key值,上行流量,下行流量,上行总流量,下行总流量四个字段作为
value值,然后以这个key,和value作为map阶段的输出,reduce阶段的输入
 

Step1:自定义map的输出对象FlowBean

package com.mapreduce.flow_count_demo1;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;


public class FlowBean implements Writable {

      private Integer upFlow;

      private Integer downFlow;

      private Integer upCountFlow;

      private Integer downCountFlow;

    public FlowBean() {
    }

    public FlowBean(Integer upFlow, Integer downFlow, Integer upCountFlow, Integer downCountFlow) {
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.upCountFlow = upCountFlow;
        this.downCountFlow = downCountFlow;
    }

    public Integer getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow) {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow) {
        this.downFlow = downFlow;
    }

    public Integer getUpCountFlow() {
        return upCountFlow;
    }

    public void setUpCountFlow(Integer upCountFlow) {
        this.upCountFlow = upCountFlow;
    }

    public Integer getDownCountFlow() {
        return downCountFlow;
    }

    public void setDownCountFlow(Integer downCountFlow) {
        this.downCountFlow = downCountFlow;
    }

    @Override
    public void write(DataOutput out) throws IOException {

        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(upCountFlow);
        out.writeInt(downCountFlow);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
         this.upFlow = in.readInt();
         this.downFlow = in.readInt();;
         this.upCountFlow = in.readInt();;
         this.downCountFlow = in.readInt();;
    }

    @Override
    public String toString() {
        return "FlowBean{" +
                "upFlow=" + upFlow +
                ", downFlow=" + downFlow +
                ", upCountFlow=" + upCountFlow +
                ", downCountFlow=" + downCountFlow +
                '}';
    }
}

Step2:定义FlowMapper类:

package com.mapreduce.flow_count_demo1;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;


public class FlowCountMapper extends Mapper<LongWritable, Text,Text,FlowBean> {


    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        // 1. 拆分手机号
        String[] split = value.toString().split("\t");
        String phoneNum = split[1];
        // 2. 获取四个流量字段
        FlowBean flowBean = new FlowBean();
        flowBean.setUpFlow(Integer.parseInt(split[6]));
        flowBean.setDownFlow(Integer.parseInt(split[7]));
        flowBean.setUpCountFlow(Integer.parseInt(split[8]));
        flowBean.setDownCountFlow(Integer.parseInt(split[9]));
        // 3.将K2和V2写入上下文中
        context.write(new Text(phoneNum),flowBean);
    }
}

Step4:定义FlowReduce类:

package com.mapreduce.flow_count_demo1;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowCountReducer extends Reducer<Text,FlowBean,Text,FlowBean> {


    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        // 封装新的flowBean
        FlowBean flowBean = new FlowBean();
        Integer upFlow  = 0;
        Integer downFlow = 0;
        Integer upCountFlow = 0;
        Integer downCountFlow = 0;
        for (FlowBean value : values) {
            upFlow += value.getUpFlow();
            downFlow += value.getDownFlow();
            upCountFlow += value.getUpCountFlow();
            downCountFlow += value.getDownCountFlow();
        }
        flowBean.setUpFlow(upFlow);
        flowBean.setDownFlow(downFlow);
        flowBean.setUpCountFlow(upCountFlow);
        flowBean.setDownCountFlow(downCountFlow);
        // 将K3 和V3 写入上下文中
        context.write(key,flowBean);

    }
}

Step4:程序Main函数入口:
 

package com.mapreduce.flow_count_demo1;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class JobMain extends Configured implements Tool {


    // 改方法用于指定一个job任务
    @Override
    public int run(String[] strings) throws Exception {

        // 1. 创建一个job任务对象
        Job job = Job.getInstance(super.getConf(), "mapReduce_flow_count");

        // 2.配置job任务对象 八个步骤

        // 第一步:指定文件的读取方式和读取路径
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job, new Path("file:///E:\\input\\flow_input"));

        // 第二步: 指定Map阶段的处理方式和数据类型
        job.setMapperClass(FlowCountMapper.class);
        job.setMapOutputKeyClass(Text.class); // 设置map阶段K2 类型
        job.setMapOutputValueClass(FlowBean.class); // 设置map阶段V2类型

        // 第三步: 分区
        // 第四步: 排序
        // 第五步: 规约
        // 第六步: 分组

        // 第七部: 指定reduce阶段的处理方式和数据类型
        job.setReducerClass(FlowCountReducer.class);
        // 设置K3
        job.setOutputKeyClass(Text.class);
        // 设置 V3
        job.setOutputValueClass(FlowBean.class);


        // 第八步:设置输出类型
        job.setOutputFormatClass(TextOutputFormat.class);
        // 设置输出的路径
        TextOutputFormat.setOutputPath(job,new Path("file:///E:\\out\\flow_output"));

        //  等待任务结束
        boolean b = job.waitForCompletion(true);


        return b ? 0:1;
    }


    public static void main(String[] args) throws Exception {

        Configuration configuration = new Configuration();
        int run = ToolRunner.run(configuration, new JobMain(), args);
        System.exit(run);
    }




}

 

 

需求二: 上行流量倒序排序(递减排序)
分析,以需求一的输出数据作为排序的输入数据,自定义FlowBean,以FlowBean为map输出的
key,以手机号作为Map输出的value,因为MapReduce程序会对Map阶段输出的key进行排序
 

Step 1: 定义FlowBean实现WritableComparable实现比较排序
Java 的 compareTo 方法说明:
compareTo 方法用于将当前对象与方法的参数进行比较。
如果指定的数与参数相等返回 0。
如果指定的数小于参数返回 -1。
如果指定的数大于参数返回 1。
例如: o1.compareTo(o2); 返回正数的话,当前对象(调用 compareTo 方法的对象 o1)要
排在比较对象(compareTo 传参对象 o2)后面,返回负数的话,放在前面
 

package com.mapreduce.flow_count_demo2;

import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowBean implements WritableComparable<FlowBean> {

    private Integer upFlow;

    private Integer downFlow;

    private Integer upCountFlow;

    private Integer downCountFlow;

    public FlowBean() {
    }

    public FlowBean(Integer upFlow, Integer downFlow, Integer upCountFlow, Integer downCountFlow) {
        this.upFlow = upFlow;
        this.downFlow = downFlow;
        this.upCountFlow = upCountFlow;
        this.downCountFlow = downCountFlow;
    }

    public Integer getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow) {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow) {
        this.downFlow = downFlow;
    }

    public Integer getUpCountFlow() {
        return upCountFlow;
    }

    public void setUpCountFlow(Integer upCountFlow) {
        this.upCountFlow = upCountFlow;
    }

    public Integer getDownCountFlow() {
        return downCountFlow;
    }

    public void setDownCountFlow(Integer downCountFlow) {
        this.downCountFlow = downCountFlow;
    }

    @Override
    public String toString() {
        return upFlow +"\t" + downFlow +"\t" + upCountFlow +"\t" + downCountFlow ;
    }

    @Override
    public int compareTo(FlowBean o) {
        return this.upCountFlow > o.upCountFlow ? -1 : 1;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(upCountFlow);
        out.writeInt(downCountFlow);

    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {

        this.upFlow = dataInput.readInt();
        this.downFlow = dataInput.readInt();
        this.upCountFlow = dataInput.readInt();
        this.downCountFlow = dataInput.readInt();
    }
}

 

Step2:定义FlowMapper:

package com.mapreduce.flow_count_demo2;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;


public class FlowMapper extends Mapper<LongWritable, Text,FlowBean,Text> {


    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String[] split = value.toString().split("\t");
        String phoneNum = split[0];
        FlowBean flowBean = new FlowBean();

        flowBean.setUpFlow(Integer.parseInt(split[1]));
        flowBean.setDownFlow(Integer.parseInt(split[2]));
        flowBean.setUpCountFlow(Integer.parseInt(split[3]));
        flowBean.setDownCountFlow(Integer.parseInt(split[4]));
        // 将K2和V2写入上下文中
        context.write(flowBean,new Text(phoneNum));
    }
}

Step3:定义Reducer

package com.mapreduce.flow_count_demo2;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


public class FlowCountSortReducer extends Reducer<FlowBean, Text,Text,FlowBean> {

    @Override
    protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException {

        for (Text value : values){
            context.write(value,key);
        }
    }
}

 

Step4:主Mian函数

package com.mapreduce.flow_count_demo2;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class JobMain extends Configured implements Tool {
    @Override
    public int run(String[] strings) throws Exception {
        // 创建一个任务对象
        Job job = Job.getInstance(super.getConf(), "mapreduce_sort_flow_count");


        // 第一步:设置读取文件的K1和V1
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("file:///E:\\out\\flow_output"));

        // 第二步:设置Mapper类
        job.setMapperClass(FlowMapper.class);
        // 设置Mapper阶段的输出类型,K2和V2类型
        job.setMapOutputKeyClass(FlowBean.class);
        job.setMapOutputValueClass(Text.class);

        // 第三步,四步,五步,六步 分区 排序  规约   分组

        // 第七步: 设置Reduce类
        job.setReducerClass(FlowCountSortReducer.class);

        // 设置Reduce阶段的输出类
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        // 第八步: 设置输出类
        job.setOutputFormatClass(TextOutputFormat.class);

        TextOutputFormat.setOutputPath(job, new Path("file:///E:\\out\\flow_sort_output"));

        boolean b = job.waitForCompletion(true);

        return b ? 0 :1;
    }


    public static void main(String[] args) throws Exception {

        Configuration configuration = new Configuration();

        int run = ToolRunner.run(configuration, new JobMain(), args);
        System.exit(run);

    }

}

需求三: 手机号码分区
在需求一的基础上,继续完善,将不同的手机号分到不同的数据文件的当中去,需要自定义
分区来实现,这里我们自定义来模拟分区,将以下数字开头的手机号进行分开
自定义分区
作业运行设置
修改输入输出路径, 并放入集群运行
135 开头数据到一个分区文件
136 开头数据到一个分区文件
137 开头数据到一个分区文件
其他分区
 

自定义分区:

package com.mapreduce.flow_count_demo3;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class FlowPartition extends Partitioner<Text,FlowBean> {
    @Override
    public int getPartition(Text text, FlowBean flowBean, int i) {

        String value = text.toString();

        if(value.startsWith("135")){
            return 0;
        }else if(value.startsWith("136")){
            return 1;
        }else if(value.startsWith("137")){
            return 2;

        }else{
            return 3;
        }

    }
}
主函数添加设置:

job.setPartitionerClass(FlowPartition.class);
job.setNumReduceTasks(4);

 

 

 

 

  • 0
    点赞
  • 33
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值