2021-06-05

分布式计算框架

今天我们一起来写一个分布式计算小案例
首先在idea里建立一个maven的包这是前提这里就先省略
如何在这个包下的java包中需要写五个java项目,这个案例呢是将下面这些数据按照手机号和其所用流量汇总并且这个文件中的Partitioner将文件中的手机号按其前三位进行一个分类汇总

这里是引用
1 13736230513 192.196.100.1 www.oracle.com 2481 24681 200
2 13846544121 192.196.100.2 264 0 200
3 13956435636 192.196.100.3 132 1512 200
4 13966251146 192.168.100.1 240 0 404
5 18271575951 192.168.100.2 www.oracle.com 1527 2106 200
6 84188413 192.168.100.3 www.oracle.com 4116 1432 200
7 13590439668 192.168.100.4 1116 954 200
8 15910133277 192.168.100.5 www.hao123.com 3156 2936 200
9 13729199489 192.168.100.6 240 0 200
10 13630577991 192.168.100.7 www.shouhu.com 6960 690 200
11 15043685818 192.168.100.8 www.baidu.com 3659 3538 200
12 15959002129 192.168.100.9 www.oracle.com 1938 180 500
13 13560439638 192.168.100.10 918 4938 200
14 13470253144 192.168.100.11 180 180 200
15 13682846555 192.168.100.12 www.qq.com 1938 2910 200
16 13992314666 192.168.100.13 www.gaga.com 3008 3720 200
17 13509468723 192.168.100.14 www.qinghua.com 7335 110349 404
18 18390173782 192.168.100.15 www.sogou.com 9531 2412 200
19 13975057813 192.168.100.16 www.baidu.com 11058 48243 200
20 13768778790 192.168.100.17 120 120 200
21 13568436656 192.168.100.18 www.alibaba.com 2481 24681 200
22 13568436656 192.168.100.19 1116 954 200

第一个项目FlowBean

package com.oracle.mapreduce.flowbean;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
//1实现writable接口
//2提供业务逻辑,分析出需要哪些属性
//3提供setter和getter方法
//4空参构造器
//    5准备tistring方法
//实现序列化与反序列化方法(序列化与反序列化顺序要一致)
public class FlowBean implements Writable {
//    上行流量
    private long upFlow;
//    下行流量
    private  long downFlow;
//    总流量
    private long sumFlow;
//
//空参构造器
    public FlowBean() {
    }
//决定最终输出结果之间的格式
    @Override
    public String toString() {
        return upFlow +"\t"+ downFlow +"\t"+ sumFlow;
    }
public void set(long up,long down){
        upFlow = up;
        downFlow = down;
        sumFlow = up+down;
}
//序列化方法
    public void write(DataOutput out) throws IOException{
        out.writeLong(upFlow);
        out.writeLong(downFlow);
        out.writeLong(sumFlow);
    }
//反序列化方法
    public void readFields(DataInput in) throws IOException {
        upFlow = in.readLong();
        downFlow = in.readLong();
        sumFlow = in.readLong();
    }

    public long getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(long upFlow) {
        this.upFlow = upFlow;
    }

    public long getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(long downFlow) {
        this.downFlow = downFlow;
    }

    public long getSumFlow() {
        return sumFlow;
    }

    public void setSumFlow(long sumFlow) {
        this.sumFlow = sumFlow;
    }

}

第二个项目FlowBeanMapper

package com.oracle.mapreduce.flowbean;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
//手机号 上行流量 下行流量 总流量
//第一个参数:初始字符占的位置(偏移量)
//第二个参数:一行数据
//第三个参数:手机号(string类型)
//第四个参数:{上行流量 下行流量 总流量}=》序列化类型

public class FlowBeanMapper extends Mapper<LongWritable, Text,Text,FlowBean> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//      1	13736230513	192.196.100.1	www.oracle.com	2481	24681	200
//       1获取一行数据
        String line = value.toString();
//        2切割一行数据
//        [1	,13736230513,	192.196.100.1	,www.oracle.com,	2481	,24681	,200]
        String[] fields = line.split("\t");
//        封装对象
//        1封装key(手机号)
        Text k = new Text();
        String phoneNumber =  fields[1];
        k.set(phoneNumber);
//        2封装value(flowBean对象)
//        封装上行流量
        long upFlow = Long.parseLong(fields[fields.length - 3]);
//        封装下行流量
        long downFlow = Long.parseLong(fields[fields.length-2]);
        FlowBean v = new FlowBean();
        v.set(upFlow,downFlow);
//        输出
        context.write(k,v);
//        [13736230513,2481	,24681,++++]
    }
}

第三个项目FlowBeanReducer

package com.oracle.mapreduce.flowbean;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;


import java.io.IOException;

public class FlowBeanRducer extends Reducer<Text,FlowBean,Text,FlowBean> {
    FlowBean v = new FlowBean();
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {

//        遍历循环
//        累加求和
        long sum_upFlow = 0;
        long sum_downFlow =0;
//        13736230513,2481	,24681,+++
//        13736230513,1116,954 +++
        for (FlowBean flowBean :values){
            sum_upFlow += flowBean.getUpFlow();
            sum_downFlow +=  flowBean.getDownFlow();
        }
    v.set(sum_upFlow,sum_upFlow);
//        输出
    context.write(key,v);

    }

}

第四个项目PhoneNumPartitioner

package com.oracle.mapreduce.flowbean;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
//两个变量应该与map(的后两位) reduce(的前两位)的输入输出变量类型相同
public class PhoneNumPartitioner extends Partitioner<Text,FlowBean> {
//    返回的是分区号-》返回到第几号分区中(手机号前三位的类型决定分区数量)
    public int getPartition(Text text, FlowBean flowBean, int numPartitions) {
//        获取手机号前三位
        String phoneNum = text.toString().substring(0,3);
//分区(最好将字符串写在equals函数前避免空指针抛出异常)
        int partition =4;
        if ("136".equals(phoneNum)){
            partition=0;
        }else if ("137".equals(phoneNum)){
            partition=1;
        }else if ("138".equals(phoneNum)){
            partition=2;
        }else if ("139".equals(phoneNum)){
            partition=3;
        }
        return partition;
    }
}

第五个项目FlowBeanDrivers

package com.oracle.mapreduce.flowbean;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class FlowBeanDrivers {
    public FlowBeanDrivers() {
    }
    public static void main(String[] args) throws IOException,ClassNotFoundException,InterruptedException{
        System.setProperty("hadoop.home.dir","D:\\Ksoftware\\hadoop-2.7.2");
//        0设置输入输出路径
        args = new String[]{"C:\\Users\\86132\\Desktop\\flowdata","C:\\Users\\86132\\Desktop\\flowoutdata"};
        Configuration conf = new Configuration();
//        1.获取job对象
        Job job = Job.getInstance(conf);
//        2设置jar的加载路径
        job.setJarByClass(FlowBeanDrivers.class);
//       3关联mapper和reducer
        job.setMapperClass(FlowBeanMapper.class);
        job.setReducerClass(FlowBeanRducer.class);
//        4设置map阶段输出key和value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

//        设置分区类
        job.setPartitionerClass(PhoneNumPartitioner.class);
//        setReduceTasks里要么填1分为一个文件要么填大于分区个数的数否则会报错
        job.setNumReduceTasks(5);

//        5设置reduce输出key和value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);
//        6设置输入输出路径
        FileInputFormat.setInputPaths(job,new Path[]{new Path(args[0])});
        FileOutputFormat.setOutputPath(job,new Path(args[1]));
//        7提交job
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0:1);

    }
}



最后你的桌面上出现FlowOutdata文件并且文件内有内容就算成功
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值