MapReduce的MapJoin和ReduceJoin

先看元数据格式,有两张表商品表和订单表p开头的列代表商品ID,我们要通过商品ID实现Join操作,在MapReduce有两种方式,MapJoin和ReduceJoin:

product.txt
p0001,小米5,1000,2000
p0002,锤子T1,1000,3000
order.txt
1001,20150710,p0001,2
1002,20150710,p0002,3
1003,20150710,p0001,3
  • ReduceJoin
    先来看看ReduceJoin操作,数据的大致过程如下图:
    在这里插入图片描述
    数据在HDFS经过InputFormat进入MapTask,由于不同表格的pid所处列不同而进行不同的切分,所以要区分数据的来源,使用上下文对象来确定数据来源于哪个文件,具体可以看代码实现。拿到数据并确定来源后,就可以对不同数据进行切分,经过MapTask后可以拿到<pid,productInfo>的键值对。数据经过shuffle就会通过默认的按key进行分区。数据到达ReduceTask就可以把相同pid的数据进行拼接,写入数据库即可。具体代码实现如下:
package cn.edu.lingnan.ReducerJoinTest;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class JoinMapper extends Mapper<LongWritable, Text, Text, Text> {
  @Override
  protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      FileSplit inputSplit = (FileSplit) context.getInputSplit();
      String fileName = inputSplit.getPath().getName();

      //如果数据来自商品表
      if(fileName.equals("product.txt")){

          String[] splitString = value.toString().split(",");
          String pid = splitString[0];
          context.write(new Text(pid), value);
      }else {
          String[] split = value.toString().split(",");
          String pid = split[2];
          context.write(new Text(pid), value);
      }
  }
}
package cn.edu.lingnan.ReducerJoinTest;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class JoinReducer extends Reducer<Text, Text, Text, Text> {

    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        String first = "";
        String second = "";
        for (Text value : values) {
            if(value.toString().startsWith("p")){
                first = value.toString();
            }else {
                second = second +"\t" + value.toString();
            }
        }

        context.write(key, new Text(first + "\t" + second));
    }
}
package cn.edu.lingnan.ReducerJoinTest;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class JobMain {

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        //1. 获取一个Job实例
        Job job = Job.getInstance(new Configuration());

        //2. 设置我们的类路径(Classpath)
        job.setJarByClass(JobMain.class);

        //3. 设置Mapper和Reducer
        job.setMapperClass(JoinMapper.class);
        job.setReducerClass(JoinReducer.class);

        //4. 设置Mapper和Reducer 输出的类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        //5. 设置输入输出数据
        FileInputFormat.setInputPaths(job, new Path("file:///D:\\in\\join"));
        FileOutputFormat.setOutputPath(job, new Path("file:///D:\\out\\join"));

        //6. 提交我们的Job
        boolean b = job.waitForCompletion(true);
        System.exit(b ? 0 : 1);

    }

}

  • MapJoin
    由于在生产环境中,MapTask和ReduceTask之间的联系是要经过网络的,数据量大的时候就会非常消耗网络,MapJoin所欲的逻辑都能在MapTask实现,不用经过ReducTask所以我们采用MapJoin来实现一下。用MapJoin实现要用到分布式缓存,就是在Hadoop集群中,提供一份内存来存储要实现join操作的表。数据转换过程如下图:
    在这里插入图片描述
    实现代码如下:
package cn.edu.lingnan.MapJoin;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;

public class JoinMapper extends Mapper<LongWritable, Text, Text, Text> {

    private HashMap<String, String> hashMap = new HashMap<>();

    //将小表的数据放入分布式缓存
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        //1.获取分布式缓存列表
        URI[] cacheFiles = context.getCacheFiles();

        //2.获取分布式缓存文件的文件系统
        FileSystem fileSystem = FileSystem.get(cacheFiles[0], context.getConfiguration());

        //3.获取文件的输入流
        FSDataInputStream inputStream = fileSystem.open(new Path(cacheFiles[0]));

        //4.读取文件,并将文件写入Map集合
        //将字节字符转换为字符流
        BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
        String line = null;

        while ((line=bufferedReader.readLine()) != null){
            String[] split = line.split(",");
            hashMap.put(split[0], line);
        }

        //5.关闭流
        bufferedReader.close();
        fileSystem.close();

    }

    //实现业务逻辑,大表与小表的join
    HashMap<String, String> result = new HashMap();
    @Override
    protected void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {
        //1.从行文本中获取pid  得到key
        String pid = value.toString().split(",")[2];

        //2.从HashMap中获取pid对应的值      加入value里面
        String priductInfo = hashMap.get(pid);

        if(!result.keySet().contains(pid)){
            result.put(pid, hashMap.get(pid) + " \t" + value.toString());
        }else {
            result.put(pid, result.get(pid) + "\t" + value.toString());
        }

    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        //3.将key和value写入上下文对象
        for (String s : result.keySet()) {
            context.write(new Text(s), new Text(result.get(s)));
        }
    }
}

package cn.edu.lingnan.MapJoin;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.net.URI;

/**
 * 在map端实现join操作,适合小表join大表,因为可以减少网络传输
 */

public class JobMain extends Configured implements Tool {
    @Override
    public int run(String[] strings) throws Exception {

        //1.获取job对象
        Job job = Job.getInstance(super.getConf(), "MapJoinTest");

        //2.设置job对象
        //将小表放入分布式缓存
        job.addCacheFile(new URI("hdfs://node01:8020/cache_file/product.txt"));

        //设置输入类和输入路径
        //job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job, new Path("file:///D:\\in\\mapjoin"));

        //设置Mapper类
        job.setMapperClass(JoinMapper.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        //设置输出类和输出路径
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job, new Path("file:///D:\\out\\map_join"));


        //3.等待任务结束
        boolean b = job.waitForCompletion(true);


        return b ? 0 : 1;
    }

    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        int run = ToolRunner.run(configuration, new JobMain(), args);
    }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值