大数据相关博客的目录

数据集

TAB为分隔符

  • 第一列:序号
  • 第二列:手机号
  • 第三列:网络IP
  • 第四列:访问的域名
  • 第五列:上行流量
  • 第六列:下行流量
  • 第七列:HTTP状态码
1   13736230513 192.196.100.1   www.atguigu.com 2481    24681   200
2   13846544121 192.196.100.2           264 0   200
3   13956435636 192.196.100.3           132 1512    200
4   13966251146 192.168.100.1           240 0   404
5   18271575951 192.168.100.2   www.atguigu.com 1527    2106    200
6   84188413    192.168.100.3   www.atguigu.com 4116    1432    200
7   13590439668 192.168.100.4           1116    954 200
8   15910133277 192.168.100.5   www.hao123.com  3156    2936    200
9   13729199489 192.168.100.6           240 0   200
10  13630577991 192.168.100.7   www.shouhu.com  6960    690 200
11  15043685818 192.168.100.8   www.baidu.com   3659    3538    200
12  15959002129 192.168.100.9   www.atguigu.com 1938    180 500
13  13560439638 192.168.100.10          918 4938    200
14  13470253144 192.168.100.11          180 180 200
15  13682846555 192.168.100.12  www.qq.com  1938    2910    200
16  13992314666 192.168.100.13  www.gaga.com    3008    3720    200
17  13509468723 192.168.100.14  www.qinghua.com 7335    110349  404
18  18390173782 192.168.100.15  www.sogou.com   9531    2412    200
19  13975057813 192.168.100.16  www.baidu.com   11058   48243   200
20  13768778790 192.168.100.17          120 120 200
21  13568436656 192.168.100.18  www.alibaba.com 2481    24681   200
22  13568436656 192.168.100.19          1116    954 200

需求

统计每个手机号的总上行流量、总下行流量、总流量

编程

实体类

package club.kwcoder.flowsum;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowWritable implements Writable {

    private Integer upFlow;
    private Integer downFlow;
    private Integer sumFlow;

    // 省略空参构造、满参构造、getter/setter、equals、hashCode

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(sumFlow);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.upFlow = in.readInt();
        this.downFlow = in.readInt();
        this.sumFlow = in.readInt();
    }

    @Override
    public String toString() {
        return upFlow + "\t" + downFlow + "\t" + sumFlow;
    }

}

Mapper类

package club.kwcoder.flowsum;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowWritable> {

    private final Text outKey = new Text();
    private final FlowWritable outVal = new FlowWritable();

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, FlowWritable>.Context context) throws IOException, InterruptedException {
        String valueStr = value.toString();
        if (StringUtils.isBlank(valueStr)) {
            return;
        }

        String[] records = valueStr.split("\t");
        String phone = records[1];
        int upFlow = Integer.parseInt(StringUtils.isBlank(records[4]) ? "0" : records[4]);
        int downFlow = Integer.parseInt(StringUtils.isBlank(records[5]) ? "0" : records[5]);
        int sumFlow = upFlow + downFlow;

        outKey.set(phone);

        outVal.setUpFlow(upFlow);
        outVal.setDownFlow(downFlow);
        outVal.setSumFlow(sumFlow);

        context.write(outKey, outVal);
    }
}

Reducer类

package club.kwcoder.flowsum;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowReducer extends Reducer<Text, FlowWritable, Text, FlowWritable> {

    private final FlowWritable outVal = new FlowWritable();

    @Override
    protected void reduce(Text key, Iterable<FlowWritable> values, Reducer<Text, FlowWritable, Text, FlowWritable>.Context context) throws IOException, InterruptedException {
        int upFlow = 0, downFlow = 0, sumFlow = 0;
        for (FlowWritable value : values) {
            upFlow += value.getUpFlow();
            downFlow += value.getDownFlow();
            sumFlow += value.getSumFlow();
        }

        outVal.setUpFlow(upFlow);
        outVal.setDownFlow(downFlow);
        outVal.setSumFlow(sumFlow);

        context.write(key, outVal);
    }
}

Runner类

package club.kwcoder.flowsum;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;

public class FlowRunner {

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        Configuration conf = new Configuration();

        FileSystem hdfs = FileSystem.get(conf);

        Path input = new Path("/flow/flow.log");
        Path output = new Path("/flow_result");

        if (hdfs.exists(output)) {
            hdfs.delete(output, true);
        }

        Job job = Job.getInstance(conf, "flowSum");
        // 配置运行类
        job.setJarByClass(FlowRunner.class);
        // 配置输入
        job.setInputFormatClass(TextInputFormat.class);
        FileInputFormat.setInputPaths(job, input);
        // 配置Mapper
        job.setMapperClass(FlowMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowWritable.class);
        // 配置Reducer
        job.setReducerClass(FlowReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowWritable.class);
        // 配置输出
        job.setOutputFormatClass(TextOutputFormat.class);
        FileOutputFormat.setOutputPath(job, output);
        // 运行
        boolean flag = job.waitForCompletion(true);
        if (flag) {
            System.out.println("flow sum success");
        }

        hdfs.close();

    }

}

大数据相关博客的目录