Reduce Join原理
ReduceJoin 中 Map端的工作:
(1)对不同来源的数据打上标签,标签就是该数据的来源地;
(2)连接字段作为key,其他部分和标签作为value
ReduceJoin 中 Reduce端工作:
(1)在reduce()中将同一个key的一组数据根据数据源字段进行分离
Reduce Join实战
1.数据源
1.订单数据表t_order
id pid amount
1001 01 1
1002 02 2
1003 03 3
1004 01 4
1005 02 5
1006 03 6
商品信息表t_product
pid pname
01 小米
02 华为
03 格力
最终数据形式
id pname amount
1001 小米 1
1004 小米 4
1002 华为 2
1005 华为 5
1003 格力 3
1006 格力 6
2.处理流程
(1)将两张表的 所有字段 + 标识标签封装成一个Bean对象
(2)Mapper
- 通过切片信息获取文件名
- mapper类中分别针对两个文件的数据封装成Bean对象,如果没有的属性,给予默认值;
- mapper的输出的就是join的key,Bean对象
(3)reduce阶段按照
- 将关联条件作为Map输出的key,将两表满足Join条件的数据并携带数据所来源的文件信息,发往同一个ReduceTask,在Reduce中进行数据的串联。
- 两张表以PID为中间人!PID是map()输出的key!
实战
(1)封装Bean
package com.fantasy.mapreduce.ReduceJoin;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class TableBean implements Writable {
private String id; //订单id
private String pid; //产品id
private int amount; //产品数量
private String pname; //产品名称
private String flag; //判断是order表还是pd表的标志字段
@Override
public String toString() {
return id + "\t" + pname + "\t" + amount;
}
public TableBean() {
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(pid);
out.writeInt(amount);
out.writeUTF(pname);
out.writeUTF(flag);
}
@Override
public void readFields(DataInput in) throws IOException {
id = in.readUTF();
pid = in.readUTF();
amount = in.readInt();
pname = in.readUTF();
flag = in.readUTF();
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getPid() {
return pid;
}
public void setPid(String pid) {
this.pid = pid;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public String getPname() {
return pname;
}
public void setPname(String pname) {
this.pname = pname;
}
public String getFlag() {
return flag;
}
public void setFlag(String flag) {
this.flag = flag;
}
}
(2)Mapper
package com.fantasy.mapreduce.ReduceJoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class TableMapper extends Mapper<LongWritable, Text,Text,TableBean> {
private String fileName ;
private Text outK;
private TableBean outV;
//Called once at the beginning of the task.
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//todo 通过切片获取数据源文件路径
FileSplit split = (FileSplit) context.getInputSplit();
//只有fileSplit才有getPath方法
fileName = split.getPath().getName();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取一行
String line = value.toString();
//判断来自哪个文件
if(fileName.contains(("order"))){
//订单表
//id pid amount
//1001 01 1
String[] split = line.split("\t");
outK.set(split[1]);
outV.setId(split[0]);
outV.setPid(split[1]);
outV.setAmount(Integer.parseInt(split[2]));
//没有也要给 给默认值
outV.setPname("");
outV.setFlag("order");
}else{
//商品信息表
//pid pname
//01 小米
String[] split = line.split("\t");
outK.set(split[0]);
outV.setId("");
outV.setPid(split[0]);
//没有也要给 给默认值
outV.setAmount(0);
outV.setPname(split[1]);
outV.setFlag("product");
}
//pid 作为key
//bean 作为value
context.write(outK,outV);
}
}
(3) Reducer
package com.fantasy.mapreduce.ReduceJoin;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
public class TableReducer extends Reducer<Text,TableBean,TableBean, NullWritable> {
@Override
protected void reduce(Text key, Iterable<TableBean> values, Reducer<Text, TableBean, TableBean, NullWritable>.Context context) throws IOException, InterruptedException {
//todo 1.说明
// 传过来的以pid作为key,value是Bean
// 因为order表中有pid会有重复,因此这里用集合存储来自order表的bean对象
// product表中pid是唯一的,所以一个bean对象即可
ArrayList<TableBean> orderBeans = new ArrayList<TableBean>();
TableBean pdBean = new TableBean();
//todo 2.说明
// 如果迭代器中是对象类型,迭代器中存储的是对象的地址
// 如果直接从迭代器中取出元素往集合中添加就是地址,并且会覆盖前面的
// 所以这里要创建一个临时对象,将迭代器元素指向的对象的数据拷贝过来
// BeanUtils是hadoop提供的工具类
for (TableBean value : values) {
if("order".equals(value.getFlag())){
TableBean tmpBean = new TableBean();
//
try {
BeanUtils.copyProperties(tmpBean,value);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
orderBeans.add(tmpBean);
}else{
try {
BeanUtils.copyProperties(pdBean,value);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
}
}
//todo 3.说明
// 遍历集合,进行join
for (TableBean orderBean : orderBeans) {
//将name设置进去
orderBean.setPname(pdBean.getPname());
context.write(orderBean,NullWritable.get());
}
}
}
(4)Driver
package com.fantasy.mapreduce.ReduceJoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class TableDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(new Configuration());
job.setJarByClass(TableDriver.class);
job.setMapperClass(TableMapper.class);
job.setReducerClass(TableReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TableBean.class);
job.setOutputKeyClass(TableBean.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path("D:\\input"));
FileOutputFormat.setOutputPath(job, new Path("D:\\output"));
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
Reduce Join缺陷
(1)一个MapTask只处理一个切片的数据,计算压力很小;
(2)如果某个key数据量很大,那么很容易造成Reducer端数据倾斜的问题
Map Join
- 由于Reduce Join容易造成数据倾斜,可以将join操作放在Map端
- 由于FileInputFormat的切片机制是以文件为单位,因此不同文件的数据根本就不会进入一个Mapper中
实现原理
(1)将数据量比较小的表,放在分布式缓存中(每个MapTask的内存中)
(2)当MapTask处理文件数据2的时候,根据key查询缓存中的数据,查询到了说明能join上,然后封装成Bean对象进行输出
应用场景
一张大表一张小表;否则内存被表撑爆了
实现步骤
(1)在Mapper的setup阶段,将小表数据文件读取到缓存集合中
(2)在Driver驱动类中,将数据文件加载到MapTask节点的内存中
//1.缓存普通文件到Task运行节点。
job.addCacheFile(new URI("file:///e:/cache/pd.txt"));
//2.如果是集群运行,需要设置HDFS路径
job.addCacheFile(new URI("hdfs://hadoop102:9820/cache/pd.txt"));
(3)Driver类中,将reduce个数设置为0
(1)Driver类
package com.atguigu.mapreduce.mapjoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class MapJoinDriver {
public static void main(String[] args) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(MapJoinDriver.class);
job.setMapperClass(MapJoinMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
//(1) 注意:如果没有Reducer阶段,这两个设置和Mapper输出保持一致
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//(2)加载较小的那个数据源到MapTask节点内存中
job.addCacheFile(new URI("file:///D:/input/inputcache/pd.txt"));
//(3)Map端Join的逻辑不需要Reduce阶段,设置reduceTask数量为0
job.setNumReduceTasks(0);
//(4)MapReduce程序只读取大表数据文件 进行切片 形成MapTask
FileInputFormat.setInputPaths(job, new Path("D:\\input"));
FileOutputFormat.setOutputPath(job, new Path("D:\\output"));
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
(2)Mapper
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
public class MapJoinMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
//存放小表数据,key是jion key
private Map<String, String> pdMap = new HashMap<>();
private Text text = new Text();
//todo 1. 任务开始前将pd数据缓存进pdMap
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//1.通过缓存文件得到小表数据pd.txt
URI[] cacheFiles = context.getCacheFiles();
Path path = new Path(cacheFiles[0]);
//2.获取文件系统对象,并开流
FileSystem fs = FileSystem.get(context.getConfiguration());
FSDataInputStream fis = fs.open(path);
//3.通过包装流转换为BufferedReader,方便按行读取
BufferedReader reader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
//逐行读取,按行处理
String line;
while (StringUtils.isNotEmpty(line = reader.readLine())) {
//切割一行
//pid pname
//01 小米
String[] split = line.split("\t");
pdMap.put(split[0], split[1]);
}
//关流
IOUtils.closeStream(reader);
}
//处理大表数据
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//订单id pid amount
//1001 01 1
String[] fields = value.toString().split("\t");
//通过大表每行数据的pid,去pdMap里面取出pname
String pname = pdMap.get(fields[1]);
//将大表每行数据的pid替换为pname
text.set(fields[0] + "\t" + pname + "\t" + fields[2]);
//写出
context.write(text,NullWritable.get());
}
}