MapReduce概述以及序列化

9 篇文章 0 订阅
7 篇文章 0 订阅

MapReduce是一个分布式运算程序编程框架。
在这里插入图片描述

  1. 分布式的运算程序往往需要分成至少2个阶段。
  2. 第一个阶段的MapTask并发实例,完全并行运行,互不相干。
  3. 第二个阶段的ReduceTask并发实例互不相干,但是他们的数据依赖于上一个阶段的所有MapTask并发实例的输出。
  4. MapReduce编程模型只能包含一个Map阶段和一个Reduce阶段,如果用户的业务逻辑非常复杂,那就只能多个MapReduce程序,串行运行。

在这里插入图片描述

Java类型Hadoop Writable类型
BooleanBooleanWritable
ByteByteWritable
IntIntWritable
FloatFloatWritable
LongLongWritable
DoubleDoubleWritable
StringText
MapMapWritable
MapMapWritable
ArrayArrayWritable

MapReduce编程规范

用户编写的程序分成三个部分:Mapper、Reducer和Driver。

在这里插入图片描述

在这里插入图片描述

1.8WordCount案例实操

环境准备
(1)创建maven工程
(2)在pom.xml文件中添加如下依赖

<dependencies>
    <dependency>
        <groupId>junit</groupId>
        <artifactId>junit</artifactId>
        <version>4.12</version>
    </dependency>
    <dependency>
        <groupId>org.apache.logging.log4j</groupId>
        <artifactId>log4j-slf4j-impl</artifactId>
        <version>2.12.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>3.1.3</version>
    </dependency>
</dependencies>

(2)在项目的src/main/resources目录下,新建一个文件,命名为“log4j2.xml”,在文件中填入。

<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="error" strict="true" name="XMLConfig">
    <Appenders>
        <!-- 类型名为Console,名称为必须属性 -->
        <Appender type="Console" name="STDOUT">
            <!-- 布局为PatternLayout的方式,
            输出样式为[INFO] [2018-01-22 17:34:01][org.test.Console]I'm here -->
            <Layout type="PatternLayout"
                    pattern="[%p] [%d{yyyy-MM-dd HH:mm:ss}][%c{10}]%m%n" />
        </Appender>

    </Appenders>

    <Loggers>
        <!-- 可加性为false -->
        <Logger name="test" level="info" additivity="false">
            <AppenderRef ref="STDOUT" />
        </Logger>

        <!-- root loggerConfig设置 -->
        <Root level="info">
            <AppenderRef ref="STDOUT" />
        </Root>
    </Loggers>

</Configuration>

编写程序

(1)编写Mapper类

package com.atguigu.mr.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class WordCountDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
       //1. 创建一个Job对象
        Configuration conf = new Configuration();
        //往集群提交Job
        //设置HDFS NameNode的地址
        //conf.set("fs.defaultFS", "hdfs://hadoop102:8020");
        // 指定MapReduce运行在Yarn上
        //conf.set("mapreduce.framework.name","yarn");
        // 指定mapreduce可以在远程集群运行
        //conf.set("mapreduce.app-submission.cross-platform","true");

        //指定Yarn resourcemanager的位置
        //conf.set("yarn.resourcemanager.hostname","hadoop103");


        Job job = Job.getInstance(conf);

        //2. 关联jar
        job.setJarByClass(WordCountDriver.class);
        //job.setJar("D:\\IdeaProjects\\Bigdata200621\\bigdata0821\\MapReduce0821\\target\\MapReduce0821-1.0-SNAPSHOT.jar");

       //3. 关联mapper 和reducer
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);
       //4. 设置mapper输出的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
       //5. 设置最终输出的kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

       //6. 设置输入和输出路径(本地)
        FileInputFormat.setInputPaths(job,new Path("D:\\input\\inputWord"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\output2"));

        //集群
        //FileInputFormat.setInputPaths(job,new Path(args[0]));
        //FileOutputFormat.setOutputPath(job,new Path(args[1]));

       //7. 提交job
        job.waitForCompletion(true);
    }
}

(2)编写Reducer类

package com.atguigu.mr.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * 1.自定义的Reducer类需要继承Hadoop提供的Reducer类
 *
 * 2.指定4个泛型(2组kv)
 *   输入的kv: 对应mapper中输出的kv
 *      key: 单词
 *      value: 单词出现了1次
 *
 *   输出的kv:
 *      key: 单词  Text
 *      value: 每个单词的总次数  IntWritable
 *
 * 3.重写reduce方法.
 */
public class WordCountReducer  extends Reducer<Text, IntWritable,Text,IntWritable> {

    //输出的value
    IntWritable outV = new IntWritable();

    /**
     *
     * @param key  某个单词
     * @param values  理解为封装了某个key的所有value
     * @param context Reducer类的上下文对象.负责reducer类的执行.
     *
     * 一组相同key的kv组会调用一次reduce方法.
     */
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int sum = 0 ;
        for (IntWritable value : values) {
            sum+=value.get();
        }
        //封装kv
        outV.set(sum);

        //写出
        context.write(key,outV);
    }
}

(3)编写Driver驱动类

package com.atguigu.mr.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class WordCountDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
       //1. 创建一个Job对象
        Configuration conf = new Configuration();
        //往集群提交Job
        //设置HDFS NameNode的地址
        //conf.set("fs.defaultFS", "hdfs://hadoop102:8020");
        // 指定MapReduce运行在Yarn上
        //conf.set("mapreduce.framework.name","yarn");
        // 指定mapreduce可以在远程集群运行
        //conf.set("mapreduce.app-submission.cross-platform","true");

        //指定Yarn resourcemanager的位置
        //conf.set("yarn.resourcemanager.hostname","hadoop103");


        Job job = Job.getInstance(conf);

        //2. 关联jar
        job.setJarByClass(WordCountDriver.class);
        //job.setJar("D:\\IdeaProjects\\Bigdata200621\\bigdata0821\\MapReduce0821\\target\\MapReduce0821-1.0-SNAPSHOT.jar");

       //3. 关联mapper 和reducer
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);
       //4. 设置mapper输出的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
       //5. 设置最终输出的kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

       //6. 设置输入和输出路径(本地)
        FileInputFormat.setInputPaths(job,new Path("D:\\input\\inputWord.txt"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\output2"));

        //集群
        //FileInputFormat.setInputPaths(job,new Path(args[0]));
        //FileOutputFormat.setOutputPath(job,new Path(args[1]));

       //7. 提交job
        job.waitForCompletion(true);
    }
}

集群上测试

(1)将程序打成jar包,然后拷贝到Hadoop集群中
步骤详情:Maven ->lifecycle-> install。等待编译完成就会在项目的target文件夹中生成jar包。如果看不到。在项目上右键 -> Refresh,即可看到。修改不带依赖的jar包名称为wc.jar,并拷贝该jar包到Hadoop集群。
(2)启动Hadoop集群
(3)执行WordCount程序

[atguigu@hadoop102 software]$ hadoop jar /home/atguigu/test/WordCount-1.0-SNAPSHOT.jar com.atguigu.mapreduce.WordcountDriver /THIRDPARTYLICENSEREADME.txt /output1

注意:这里的地址是hdfs的地址不是Linux的地址

Hadoop序列化

在这里插入图片描述

在这里插入图片描述
在企业开发中往往常用的基本序列化类型不能满足所有需求,比如在Hadoop框架内部传递一个bean对象,那么该对象就需要实现序列化接口。
具体实现bean对象序列化步骤如下7步。
(1)必须实现Writable接口
(2)反序列化时,需要反射调用空参构造函数,所以必须有空参构造

public FlowBean() {
	super();
}

(3)重写序列化方法

@Override
public void write(DataOutput out) throws IOException {
	out.writeLong(upFlow);
	out.writeLong(downFlow);
	out.writeLong(sumFlow);
}

(4)重写反序列化方法

@Override
public void readFields(DataInput in) throws IOException {
	upFlow = in.readLong();
	downFlow = in.readLong();
	sumFlow = in.readLong();
}

(5)注意反序列化的顺序和序列化的顺序完全一致
(6)要想把结果显示在文件中,需要重写toString(),可用”\t”分开,方便后续用。
(7)如果需要将自定义的bean放在key中传输,则还需要实现Comparable接口,因为MapReduce框中的Shuffle过程要求对key必须能排序。

@Override
public int compareTo(FlowBean o) {
	// 倒序排列,从大到小
	return this.sumFlow > o.getSumFlow() ? -1 : 1;
}

序列化案例实操

1	13736230513	192.196.100.1	www.atguigu.com	2481	24681	200
2	13846544121	192.196.100.2			264	0	200
3 	13956435636	192.196.100.3			132	1512	200
4 	13966251146	192.168.100.1			240	0	404
5 	18271575951	192.168.100.2	www.atguigu.com	1527	2106	200
6 	84188413	192.168.100.3	www.atguigu.com	4116	1432	200
7 	13590439668	192.168.100.4			1116	954	200
8 	15910133277	192.168.100.5	www.hao123.com	3156	2936	200
9 	13729199489	192.168.100.6			240	0	200
10 	13630577991	192.168.100.7	www.shouhu.com	6960	690	200
11 	15043685818	192.168.100.8	www.baidu.com	3659	3538	200
12 	15959002129	192.168.100.9	www.atguigu.com	1938	180	500
13 	13560439638	192.168.100.10			918	4938	200
14 	13470253144	192.168.100.11			180	180	200
15 	13682846555	192.168.100.12	www.qq.com	1938	2910	200
16 	13992314666	192.168.100.13	www.gaga.com	3008	3720	200
17 	13509468723	192.168.100.14	www.qinghua.com	7335	110349	404
18 	18390173782	192.168.100.15	www.sogou.com	9531	2412	200
19 	13975057813	192.168.100.16	www.baidu.com	11058	48243	200
20 	13768778790	192.168.100.17			120	120	200
21 	13568436656	192.168.100.18	www.alibaba.com	2481	24681	200
22 	13568436656	192.168.100.19			1116	954	200

统计每一个手机号耗费的总上行流量、下行流量、总流量

编写MapReduce程序
(1)编写流量统计的Bean对象

package com.atguigu.writable;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
 * 用于封装每个手机号的 上行流量  下行流量  总流量
 */
public class FlowBean implements Writable {

    private Integer upFlow ;   // 上行流量

    private Integer downFlow ; // 下行流量

    private Integer sumFlow ;  // 总流量

    public Integer getUpFlow() {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow) {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow() {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow) {
        this.downFlow = downFlow;
    }

    public Integer getSumFlow() {
        return sumFlow;
    }

    public void setSumFlow(Integer sumFlow) {
        this.sumFlow = sumFlow;
    }

    public void setSumFlow(){
        setSumFlow(getUpFlow() + getDownFlow());
    }


    public FlowBean(){}

    /**
     * 序列化方法
     * @param out
     * @throws IOException
     */
    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(upFlow);
        out.writeInt(downFlow);
        out.writeInt(sumFlow);
    }

    /**
     * 反序列化方法
     *
     * 注意: 反序列的顺序要与序列化的顺序一致.
     */
    @Override
    public void readFields(DataInput in) throws IOException {
        this.upFlow = in.readInt();
        this.downFlow = in.readInt();
        this.sumFlow = in.readInt();
    }

    @Override
    public String toString() {
        return  this.upFlow + "\t" + this.downFlow +"\t" + this.sumFlow ;
    }
}

(2)编写Mapper类

package com.atguigu.writable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text, Text , FlowBean> {

    //输出的key
    private   Text outK = new Text();
    //输出的value
    private  FlowBean  outV = new FlowBean();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //1. 获取一行数据
        //  1	13736230513	192.196.100.1	www.atguigu.com	2481	24681	200
        String line = value.toString();

        //2.切割数据
        String[] splits = line.split("\t");

        //3. 封装输出的key
        outK.set(splits[1]);

        //4. 封装输出的value
        outV.setUpFlow(Integer.parseInt(splits[splits.length-3]));  //上行
        outV.setDownFlow(Integer.parseInt(splits[splits.length-2]));//下行
        outV.setSumFlow();

        //5. 写出
        context.write(outK,outV);

    }
    
}

(3)编写Reducer类

package com.atguigu.writable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer  extends Reducer<Text,FlowBean,Text,FlowBean> {
    // 输出的v
    private FlowBean outV  = new FlowBean() ;

    /**
     * 相同key的多个kv对会进入到一个reduce方法
     */
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        int totalUpFlow =0 ;
        int totalDownFlow = 0 ;
        //1. 迭代values,计算当前key的总上行,总下行,总流量
        for (FlowBean value : values) {
            totalUpFlow += value.getUpFlow();
            totalDownFlow += value.getDownFlow();
        }
        //2. 封装输出的value
        outV.setUpFlow(totalUpFlow);
        outV.setDownFlow(totalDownFlow);
        outV.setSumFlow();

        //3. 写出
        context.write(key, outV);
    }
}

(4)编写Driver驱动类

package com.atguigu.writable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FlowDriver {
    public static void main(String[] args) throws Exception {
        //1. 获取job对象
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        //2. 关联jar
        job.setJarByClass(FlowDriver.class);
        //3. 关联Mapper 和 Reducer
        job.setMapperClass(FlowMapper.class);
        job.setReducerClass(FlowReducer.class);
        //4. 设置Map输出的key 和 value的类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);
        //5. 设置最终输出的key 和 value的类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);
        //6. 设置输入和输出路径
        FileInputFormat.setInputPaths(job,new Path("D:\\input\\inputflow"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\output"));
        //7. 提交Job
        job.waitForCompletion(true);
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值