Join应用

Join应用

1、Reduce join

数据:

order.txt

idpidamount
1001011
1002022
1003033
1004014
1005025
1006036

pd.txt

pidpname
01小米
02华为
03格力

分析:

Map端的主要工作:为来自不同表或文件的key/value对,打标签以区别不同来源的记录。然后用连接字段作为key,其余部分和新加的标志作为value,最后进行输出。

Reduce端的主要工作:在Reduce端以连接字段作为key的分组已经完成,我们只需要在每一个分组当中将那些来源于不同文件的记录(在Map阶段已经打标志)分开,最后进行合并就ok了。

  1. 准备一个TableBean封装对象,使得能够适应以上两个表且区分两个表。

    package com.hpu.review.rj;
    
    import org.apache.hadoop.io.Writable;
    
    import java.io.DataInput;
    import java.io.DataOutput;
    import java.io.IOException;
    
    public class TableBean implements Writable {
        private String id;
        private String pid;
        private Integer amount;
        private String pname;
        //区分两个表
        private String flag;
    
        public TableBean(){}
    
        public String getId() {
            return id;
        }
    
        public void setId(String id) {
            this.id = id;
        }
    
        public String getPid() {
            return pid;
        }
    
        public void setPid(String pid) {
            this.pid = pid;
        }
    
        public Integer getAmount() {
            return amount;
        }
    
        public void setAmount(Integer amount) {
            this.amount = amount;
        }
    
        public String getPname() {
            return pname;
        }
    
        public void setPname(String pname) {
            this.pname = pname;
        }
    
        public String getFlag() {
            return flag;
        }
    
        public void setFlag(String flag) {
            this.flag = flag;
        }
    
        @Override
        public String toString() {
            //reduce阶段输出要用到,返回需要的字段
            return id+"\t"+pname+"\t"+amount;
        }
    
        @Override
        public void write(DataOutput out) throws IOException {
            out.writeUTF(id);
            out.writeUTF(pid);
            out.writeInt(amount);
            out.writeUTF(pname);
            out.writeUTF(flag);
        }
    
        @Override
        public void readFields(DataInput in) throws IOException {
            this.id = in.readUTF();
            this.pid = in.readUTF();
            this.amount = in.readInt();
            this.pname = in.readUTF();
            this.flag = in.readUTF();
        }
    }
    
    
  2. 写Mapper,首先继承Mapper且指定输出key为Text(pid),Value为TableBean对象;

  3. 通过context获取切片信息,由于InputSplite抽象类中没有文件路径及其文件名信息,将其强转为FileSplit,再取相应信息。

  4. 根据不同文件分别输出KV。

    package com.hpu.review.rj;
    
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.InputSplit;
    import org.apache.hadoop.mapreduce.Mapper;
    import org.apache.hadoop.mapreduce.lib.input.FileSplit;
    
    import java.io.IOException;
    
    public class MyMapper extends Mapper<LongWritable, Text, Text, TableBean> {
        private Text outK;
        private TableBean outV;
        private String name;
    
        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            outK = new Text();
            outV = new TableBean();
            //获取对应文件名称
            InputSplit inputSplit = context.getInputSplit();
            FileSplit fileSplit = (FileSplit)inputSplit;
            name = fileSplit.getPath().getName();
    
        }
    
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
    		//判断是哪个文件,然后针对文件进行不同的操作
            if (name.contains("order")){//订单表的处理
                String[] orderInfo = line.split("\t");
                outV.setId(orderInfo[0]);
                outV.setPid(orderInfo[1]);
                outV.setAmount(Integer.parseInt(orderInfo[2]));
                outV.setPname("");
                outV.setFlag("order");
                outK.set(orderInfo[1]);
                context.write(outK,outV);
            } else { //商品表的处理
                String[] pdInfo = line.split("\t");
                outV.setId("");
                outV.setPid(pdInfo[0]);
                outV.setAmount(0);
                outV.setPname(pdInfo[1]);
                outV.setFlag("pd");
                outK.set(pdInfo[0]);
                context.write(outK,outV);
            }
        }
    }
    
    
  5. 针对MyReducer,首先继承Reducer类;

  6. 指定Key为TableBean,Value为NullWritable;这里只把符合要求的bean对象中的pname替换即可;

  7. values迭代器中,由于pid在pd表中是唯一的,因此在迭代器中也仅有一个,其余全部都是order。这也是为什么pdBean是一个对象,而tableBeans是一个集合的原因。

    package com.hpu.review.rj;
    
    import org.apache.commons.beanutils.BeanUtils;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;
    
    import java.io.IOException;
    import java.lang.reflect.InvocationTargetException;
    import java.util.ArrayList;
    
    public class MyReducer extends Reducer<Text,TableBean,TableBean, NullWritable> {
    
        //多个订单对应一个商品名
        private ArrayList<TableBean> tableBeans;
        private TableBean pdBean;
    
        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            tableBeans = new ArrayList<>();
            pdBean = new TableBean();
    
        }
    
        @Override
        protected void reduce(Text key, Iterable<TableBean> values, Context context) throws IOException, InterruptedException {
            //每次都需要清空链表
            tableBeans.clear();
            for (TableBean value : values) {
                TableBean tmpBean = new TableBean();
                //判断数据来自哪个表
                if ("order".equals(value.getFlag())){
                    try {
                        BeanUtils.copyProperties(tmpBean,value);
                        //将临时TableBean对象添加到集合orderBeans
                        tableBeans.add(tmpBean);
                    } catch (IllegalAccessException e) {
                        e.printStackTrace();
                    } catch (InvocationTargetException e) {
                        e.printStackTrace();
                    }
                }else {
                    try {
                        BeanUtils.copyProperties(tmpBean,value);
                        pdBean = tmpBean;
                    } catch (IllegalAccessException e) {
                        e.printStackTrace();
                    } catch (InvocationTargetException e) {
                        e.printStackTrace();
                    }
                }
            }
            //遍历集合orderBeans,替换掉每个orderBean的pid为pname,然后写出
            for (TableBean tableBean : tableBeans) {
                tableBean.setPname(pdBean.getPname());
                context.write(tableBean,NullWritable.get());
            }
        }
    }
    
    
  8. Driver没什么好说的

    package com.hpu.review.rj;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    import java.io.IOException;
    
    public class MyDriver {
        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
    
            job.setJarByClass(MyDriver.class);
            job.setMapperClass(MyMapper.class);
            job.setReducerClass(MyReducer.class);
    
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(TableBean.class);
            job.setOutputKeyClass(TableBean.class);
            job.setOutputValueClass(NullWritable.class);
    
            FileInputFormat.setInputPaths(job,new Path("E:\\Test\\input\\inputtable"));
            FileOutputFormat.setOutputPath(job,new Path("E:\\Test\\rj1"));
    
            job.waitForCompletion(true);
        }
    }
    

2、map join

采用DistributedCache

​ (1)在Mapper的setup阶段,将文件读取到缓存集合中。

​ (2)在Driver驱动类中加载缓存。

//缓存普通文件到Task运行节点。
job.addCacheFile(new URI("file:///e:/cache/pd.txt"));
//如果是集群运行,需要设置HDFS路径
job.addCacheFile(new URI("hdfs://hadoop102:8020/cache/pd.txt"));
  1. 先在MapJoinDriver驱动类中添加缓存文件

    package com.hpu.review.mj;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    import java.io.IOException;
    import java.net.URI;
    import java.net.URISyntaxException;
    
    public class MyDriver {
        public static void main(String[] args) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
    
            job.setJarByClass(MyDriver.class);
            job.setMapperClass(MyMapper.class);
            job.setNumReduceTasks(0);
    
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(NullWritable.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(NullWritable.class);
    
            //添加pd.txt
            job.addCacheFile(new URI("file:///E:/Test/input/inputtablecache/pd.txt"));
    
            FileInputFormat.setInputPaths(job,new Path("E:\\Test\\input\\inputtable2"));
            FileOutputFormat.setOutputPath(job,new Path("E:\\Test\\mj1"));
            job.waitForCompletion(true);
        }
    }
    
    
  2. 在MapJoinMapper类中的setup方法中读取缓存文件

    package com.hpu.review.mj;
    
    import org.apache.hadoop.fs.FSDataInputStream;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IOUtils;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;
    
    import java.io.BufferedReader;
    import java.io.IOException;
    import java.io.InputStreamReader;
    import java.net.URI;
    import java.util.HashMap;
    
    public class MyMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
        private HashMap<String,String> pdmap = new HashMap<String,String>();
        private Text text = new Text();
    
        //任务开始前将pd数据缓存进pdMap
        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            //通过缓存文件得到小表数据pd.txt
            URI[] cacheFiles = context.getCacheFiles();
            URI cacheFile = cacheFiles[0];
    
            //开流
            FileSystem fileSystem = FileSystem.get(context.getConfiguration());
            FSDataInputStream open = fileSystem.open(new Path(cacheFile));
    
            BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(open, "UTF-8"));
            String line;
            while ((line = bufferedReader.readLine())!=null){
                String[] split = line.split("\t");
                pdmap.put(split[0],split[1]);
            }
            IOUtils.closeStreams(bufferedReader,open);
        }
    
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] line = value.toString().split("\t");
            text.set(line[0]+"\t"+pdmap.get(line[1])+"\t"+line[2]);
            context.write(text,NullWritable.get());
        }
    }
    

3、总结

Reduce Join方式中,合并的操作是在Reduce阶段完成,Reduce端的处理压力太大,Map节点的运算负载则很低,资源利用率不高,且在Reduce阶段极易产生数据倾斜

解决方案:Map端实现数据合并。

Map Join适用于一张表十分小、一张表很大的场景。

在Map端缓存多张表,提前处理业务逻辑,这样增加Map端业务,减少Reduce端数据的压力,尽可能的减少数据倾斜。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

MelodyYN

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值