【Hadoop】10.MapReduce框架原理-连接(Join)

说明

在MR中经常会使用的是join,而join分为两种:一是ReduceJoin;二是MapJoin。

ReduceJoin
ReduceJoin工作原理

Map端的主要工作:为来自不同表或文件的key/value时,打标签以区别不同来源的记录。然后用连接字段作为key,其余部分和新加的标志作为value,最后进行输出。

Reduce端的主要工作:在Reduce端以连接字段作为key的分组已经完成,我们只需要在每一个分组当中将那些来源于不同文件的记录(在Map阶段已经打标志)分开,最后进行合并就ok了。

示例

输入文件:
order.txt在这里插入图片描述
pd.txt
在这里插入图片描述
OrderBean:订单对象

package com.xing.MapReduce.ReduceJoin;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class OrderBean implements Writable {
    private String id;
    private String pid;
    private String num;
    private String adress;
    private String name;
    private String type;


    public String getType() {
        return type;
    }

    public void setType(String type) {
        this.type = type;
    }

    public String getId() {
        return id;
    }

    public void setId(String id) {
        this.id = id;
    }

    public String getPid() {
        return pid;
    }

    public void setPid(String pid) {
        this.pid = pid;
    }

    public String getNum() {
        return num;
    }

    public void setNum(String num) {
        this.num = num;
    }

    public String getAdress() {
        return adress;
    }

    public void setAdress(String adress) {
        this.adress = adress;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(id);
        dataOutput.writeUTF(pid);
        dataOutput.writeUTF(num);
        dataOutput.writeUTF(adress);
        dataOutput.writeUTF(name);
        dataOutput.writeUTF(type);
    }

    public void readFields(DataInput dataInput) throws IOException {
        this.id= dataInput.readUTF();
        this.pid= dataInput.readUTF();
        this.num= dataInput.readUTF();
        this.adress= dataInput.readUTF();
        this.name= dataInput.readUTF();
        this.type=dataInput.readUTF();
    }

    @Override
    public String toString() {
        return id+"\t"+pid+"\t"+num+"\t"+adress+"\t"+name+"\t"+type;
    }
}

ReduceJoinMapper:mapper处理类

package com.xing.MapReduce.ReduceJoin;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class ReduceJoinMapper extends Mapper<LongWritable,Text,Text,OrderBean> {


    private OrderBean orderBean = new OrderBean();
    private Text k = new Text();
    private String fileName;
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        fileName = inputSplit.getPath().getName();
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String s = value.toString();
        String[] split = s.split("\t", -1);
        // 根据不同的输入文件 做不同的操作 最终输出为pid orderbean的kv键值对
        if (fileName.startsWith("order")){
            // id pid adress num
            orderBean.setId(split[0]);
            orderBean.setPid(split[1]);
            orderBean.setAdress(split[2]);
            orderBean.setNum(split[3]);
            orderBean.setName("");
            orderBean.setType("1");
            k.set(split[1]);
        }else {
            orderBean.setId("");
            orderBean.setPid(split[0]);
            orderBean.setAdress("");
            orderBean.setNum("");
            orderBean.setName(split[1]);
            orderBean.setType("2");
            k.set(split[0]);
        }
        context.write(k,orderBean);
    }
}

ReduceJoinReducer:reducer处理类

package com.xing.MapReduce.ReduceJoin;

import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;

public class ReduceJoinReducer extends Reducer<Text,OrderBean,OrderBean,NullWritable> {

    @Override
    protected void reduce(Text key, Iterable<OrderBean> values, Context context) throws IOException, InterruptedException {
        List<OrderBean> list = new ArrayList<OrderBean>();
        OrderBean orderBeansTmp = new OrderBean();
        // 判断类型 如果是产品类型 则复制一份给orderBeansTmp 否则就添加到list
        for (OrderBean orderBean : values) {
            if (orderBean.getType().equals("1")){
                /**
                 * 注意这里不能直接添加到list 要先复制一份 如果直接添加都list 呢么添加进去的地址是同一个地址 最后的结果都是当前循环的最后一个
                 */

                OrderBean orderBean1 = new OrderBean();
                try {
                    BeanUtils.copyProperties(orderBean1,orderBean );
                    list.add(orderBean1);
                } catch (IllegalAccessException e) {
                    e.printStackTrace();
                } catch (InvocationTargetException e) {
                    e.printStackTrace();
                }
            }else {
                try {
                    BeanUtils.copyProperties(orderBeansTmp,orderBean );
                } catch (IllegalAccessException e) {
                    e.printStackTrace();
                } catch (InvocationTargetException e) {
                    e.printStackTrace();
                }
            }
        }
        // 循环遍历填充名称
        for (OrderBean orderBean : list) {
            System.out.println("打印:"+orderBean);
            orderBean.setName(orderBeansTmp.getName());
            context.write(orderBean,NullWritable.get());
        }
    }
}

ReduceJoinDriver:驱动类

package com.xing.MapReduce.ReduceJoin;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class ReduceJoinDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        System.setProperty("hadoop.home.dir", "E:\\hadoop-2.7.1");
        Configuration configuration = new Configuration();
        FileSystem fs = FileSystem.get(configuration);
        Job job = Job.getInstance(configuration);

        job.setJobName("ReduceJoin");

        job.setJarByClass(ReduceJoinDriver.class);
        job.setMapperClass(ReduceJoinMapper.class);
        job.setReducerClass(ReduceJoinReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(OrderBean.class);

        job.setOutputKeyClass(OrderBean.class);
        job.setOutputValueClass(NullWritable.class);

        Path input = new Path("E:\\hdfs\\data\\reducejoin\\input");
        Path output = new Path("E:\\hdfs\\data\\reducejoin\\output");
        // 删除输出目录数据
        if (fs.exists(output)){
            fs.delete(output,true );
        }
        FileInputFormat.setInputPaths(job,input);
        FileOutputFormat.setOutputPath(job,output );

        boolean b = job.waitForCompletion(true);
        System.exit(b?0:-1);
    }
}

输出结果:
在这里插入图片描述

缺陷

reduceJoin容易造成数据倾斜,合并的操作都是在Reduce阶段完成,Reduce端的处理压力很大,Map节点的运算负载很低,资源利用率不高,且在Reduce阶段易产生数据倾斜。
怎么解决?
Map端实现数据合并,这就是MapJoin

MapJoin

使用前提: 缓存的文件必须是小文件,文件是直接加载到内存中,如果文件太大,容易内存不足。
优点:
在Map端缓存多张表,提前处理业务逻辑,这样增加Map端业务,减少Reduce端数据的压力,尽可能的减少数据倾斜。
具体办法:
采用DistributedCache
(1)在Mapper的setup阶段,将文件读取到缓存集合中。
(2)在驱动函数中加载缓存。
job.addCacheFile(new URI("XXXXX"));

示例

MapJoinDriver: 驱动类

package com.xing.MapReduce.MapJoin;

import com.xing.MapReduce.ReduceJoin.ReduceJoinReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class MapJoinDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        System.setProperty("hadoop.home.dir", "E:\\hadoop-2.7.1");
        Configuration configuration = new Configuration();
        FileSystem fs = FileSystem.get(configuration);
        Job job = Job.getInstance(configuration);

        job.setJobName("MapJoin");
        job.setJarByClass(MapJoinDriver.class);
        job.setMapperClass(MapJoinMapper.class);
        job.setReducerClass(ReduceJoinReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);
        // 添加缓存文件
        job.addCacheFile(new Path("E:\\hdfs\\data\\reducejoin\\input1\\pd.txt").toUri());

        job.setNumReduceTasks(0);

        Path input = new Path("E:\\hdfs\\data\\reducejoin\\input");
        Path output = new Path("E:\\hdfs\\data\\reducejoin\\output");
        // 删除输出目录数据
        if (fs.exists(output)){
            fs.delete(output,true );
        }
        FileInputFormat.setInputPaths(job,input);
        FileOutputFormat.setOutputPath(job,output );

        boolean b = job.waitForCompletion(true);
        System.exit(b?0:-1);
    }
}

MapJoinMapper: map端处理

package com.xing.MapReduce.MapJoin;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.*;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;

public class MapJoinMapper extends Mapper<LongWritable,Text,Text,NullWritable> {

    private Map<String,String> map = new HashMap<String, String>();
    private Text k = new Text();
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        // 读取缓存文件
        URI[] cacheFiles = context.getCacheFiles();
        String cacheFile = cacheFiles[0].getPath();
        BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(cacheFile),"UTF-8"));
        String line;
        while (StringUtils.isNotEmpty(line = reader.readLine())){
            String[] strings = line.split("\t",-1);
            map.put(strings[0],strings[1]);
        }
        reader.close();

    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String s = value.toString();
        String[] split = s.split("\t", -1);
        String name = map.get(split[1]);
        s =s.concat("\t"+name);
        k.set(s);
        context.write(k,NullWritable.get());
    }
}

重点看setup方法。

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值