Hadoop_09-10_MapReduce上网流量综合练习

手机上网流量的综合统计

日志格式

在这里插入图片描述

流量统计求和

统计每个手机号的上行流量总和,下行流量总和,上行总流量之和,下行总流量之和
分析:以手机号码作为key值,上行流量,下行流量,上行总流量,下行总流量四个字段作为value值,然后以这个key,和value作为map阶段的输出,reduce阶段的输入
在这里插入图片描述

程序实现

FlowNum 制作一个javabean类

package cn.nina.mr.demo3;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowNum implements Writable {

    private Integer upFlow; //上行流量
    private Integer downFlow;
    private Integer upCountFlow;//上行总流量
    private Integer downCountFlow;

    //序列化
    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(upCountFlow);
        out.writeInt(downCountFlow);
    }
    //反序列化
    @Override
    public void readFields(DataInput in) throws IOException {
        this.upFlow = in.readInt();
        this.downFlow = in.readInt();
        this.upCountFlow = in.readInt();
        this.downCountFlow = in.readInt();
    }

    public Integer getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow) {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow) {
        this.downFlow = downFlow;
    }

    public Integer getUpCountFlow() {
        return upCountFlow;
    }

    public void setUpCountFlow(Integer upCountFlow) {
        this.upCountFlow = upCountFlow;
    }

    public Integer getDownCountFlow() {
        return downCountFlow;
    }

    public void setDownCountFlow(Integer downCountFlow) {
        this.downCountFlow = downCountFlow;
    }

    @Override
    public String toString() {
        return upFlow + "\t" + downFlow + "\t" + upCountFlow + "\t" + downCountFlow;
    }
}

FlowNum Mapper类

package cn.nina.mr.demo3;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowNumMapper extends Mapper<LongWritable,Text,Text,FlowNum> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] split = value.toString().split("\t");
        String phoneNum = split[1];
        Integer upFlow = Integer.parseInt(split[6]);
        Integer downFlow = Integer.parseInt(split[7]);
        Integer upCountFlow = Integer.parseInt(split[8]);
        Integer downCountFlow = Integer.parseInt(split[9]);
        FlowNum flowNum = new FlowNum();
        flowNum.setUpFlow(upFlow);
        flowNum.setDownFlow(downFlow);
        flowNum.setUpCountFlow(upCountFlow);
        flowNum.setDownCountFlow(downCountFlow);
        context.write(new Text(phoneNum),flowNum);

    }
}

FlowNum Reducer类

package cn.nina.mr.demo3;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowNumReducer extends Reducer<Text,FlowNum,Text,FlowNum> {
    @Override
    protected void reduce(Text key, Iterable<FlowNum> values, Context context) throws IOException, InterruptedException {
        int upFlow = 0;
        int downFlow = 0;
        int upCountFlow = 0;
        int downCountFlow = 0;
        for (FlowNum value:values) {
            upFlow += value.getUpFlow();
            downFlow += value.getDownFlow();
            upCountFlow += value.getUpCountFlow();
            downCountFlow += value.getDownCountFlow();
        }
        FlowNum flowNum = new FlowNum();
        flowNum.setUpFlow(upFlow);
        flowNum.setDownFlow(downFlow);
        flowNum.setUpCountFlow(upCountFlow);
        flowNum.setDownCountFlow(downCountFlow);
        context.write(key,flowNum);
    }
}

实现类 Main

package cn.nina.mr.demo3;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FlowNumMain extends Configured implements Tool {

    @Override
    public int run(String[] strings) throws Exception {
        //job 类
        Job job = Job.getInstance(super.getConf(), "flowNum");
        //打包加此句
        //job.setJarByClass(FlowNumMain.class);

        //第一步:读取文件解析k1,v1
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\流量统计\\input"));
        //TextInputFormat.addInputPath(job,new Path(args[0]));

        //第二步:自定义Mapper,输入K1,v1,输出k2,v2
        job.setMapperClass(FlowNumMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowNum.class);

        //第四步分区
        //job.setPartitionerClass(PhonePartition.class);

        //第七步:自定义Reducer
        job.setReducerClass(FlowNumReducer.class);

        //设定reduce个数
        //job.setNumReduceTasks(6);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowNum.class);

        //第八步:输出
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\流量统计\\output"));
        //TextOutputFormat.setOutputPath(job,new Path(args[1]);
        //提交任务
        boolean b = job.waitForCompletion(true);

        return b?0:1;
    }

    public static void main(String[] args) throws Exception {
        int run = ToolRunner.run(new Configuration(), new FlowNumMain(), args);
        System.exit(run);
    }

}

按照上行流量排序

要对谁排序,将谁变成K2。
由于要排序JavaBean类implements WritableComparable,第一例不需要排序时仅需implements Writable。

在这里插入图片描述

FlowNumSort类 k2排序

package cn.nina.mr.demo4;

import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowNumSort implements WritableComparable<FlowNumSort> {

    private Integer upFlow; //上行流量
    private Integer downFlow;
    private Integer upCountFlow;//上行总流量
    private Integer downCountFlow;

    //序列化
    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(upCountFlow);
        out.writeInt(downCountFlow);
    }
    //反序列化
    @Override
    public void readFields(DataInput in) throws IOException {
        this.upFlow = in.readInt();
        this.downFlow = in.readInt();
        this.upCountFlow = in.readInt();
        this.downCountFlow = in.readInt();
    }

    public Integer getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow) {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow) {
        this.downFlow = downFlow;
    }

    public Integer getUpCountFlow() {
        return upCountFlow;
    }

    public void setUpCountFlow(Integer upCountFlow) {
        this.upCountFlow = upCountFlow;
    }

    public Integer getDownCountFlow() {
        return downCountFlow;
    }

    public void setDownCountFlow(Integer downCountFlow) {
        this.downCountFlow = downCountFlow;
    }

    @Override
    public String toString() {
        return upFlow + "\t" + downFlow + "\t" + upCountFlow + "\t" + downCountFlow;
    }

    @Override
    public int compareTo(FlowNumSort o) {
        //按照上行流量排序
        int i = this.upFlow.compareTo(o.upFlow);
        return i;
    }

}

Mapper类

package cn.nina.mr.demo4;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowNumSortMapper extends Mapper<LongWritable,Text,FlowNumSort,Text> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //13367671234 3 3 180 180
        String[] split = value.toString().split("\t");
        FlowNumSort flowNumSort = new FlowNumSort();
        flowNumSort.setUpFlow(Integer.parseInt(split[1]));
        flowNumSort.setDownFlow(Integer.parseInt(split[2]));
        flowNumSort.setUpCountFlow(Integer.parseInt(split[3]));
        flowNumSort.setDownCountFlow(Integer.parseInt(split[4]));

        context.write(flowNumSort,new Text(split[0]));
    }
}

Reducer类

package cn.nina.mr.demo4;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowNumSortReducer extends Reducer<FlowNumSort,Text,FlowNumSort,Text> {
    @Override
    protected void reduce(FlowNumSort key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        for (Text value : values) {
            context.write(key,value);
        }
    }
}

Main

package cn.nina.mr.demo4;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FlowNumSortMain extends Configured implements Tool {

    @Override
    public int run(String[] strings) throws Exception {
        //job 类
        Job job = Job.getInstance(super.getConf(), "flowNumSort");
        //第一步:读取文件解析k1,v1
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\流量统计\\output"));

        //第二步:自定义Mapper,输入K1,v1,输出k2,v2
        job.setMapperClass(FlowNumSortMapper.class);
        job.setMapOutputKeyClass(FlowNumSort.class);
        job.setMapOutputValueClass(Text.class);

        //第七步:自定义Reducer
        job.setReducerClass(FlowNumSortReducer.class);
        job.setOutputKeyClass(FlowNumSort.class);
        job.setOutputValueClass(Text.class);

        //第八步:输出
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\流量统计\\outputUpFlow"));

        //提交任务
        boolean b = job.waitForCompletion(true);

        return b?0:1;
    }

    public static void main(String[] args) throws Exception {
        int run = ToolRunner.run(new Configuration(), new FlowNumSortMain(), args);
        System.exit(run);
    }

}

手机号码分区

将不同的手机号分到不同的数据文件的当中去,需要自定义分区来实现,这里我们自定义来模拟分区,将以下数字开头的手机号进行分开:
137 开头数据到一个分区文件
138 开头数据到一个分区文件
139 开头数据到一个分区文件
135 开头数据到一个分区文件
136 开头数据到一个分区文件
其他分区

由于是自定义分区,所以无法在本地实现,需要打包上传到服务器集群。
在流量统计求和示例代码中的Main类注释了需要添加的步骤:

  1. 打包加此句
    //job.setJarByClass(FlowNumMain.class);
  2. 第四步:自定义分区
    //job.setPartitionerClass(PhonePartition.class);
  3. 第七步增加设定reduce个数
    //job.setNumReduceTasks(6);
  4. run方法中的strings更改为args
    //public int run(String[] args) throws Exception
  5. 更改输入输出路径为args[0] args[1]

Partition类

package cn.nina.mr.demo3;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class PhonePartition extends Partitioner<Text,FlowNum> {
    @Override
    public int getPartition(Text text, FlowNum flowNum, int i) {

        String phoneNum = text.toString();
        if (phoneNum.startsWith("135")){
            return 0;
        }else if (phoneNum.startsWith("136")){
            return 1;
        }else if (phoneNum.startsWith("137")){
            return 2;
        }else if (phoneNum.startsWith("138")){
            return 3;
        }else if (phoneNum.startsWith("139")){
            return 4;
        }else {
            return 5;
        }

    }
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值