目录
1. 需求概述
在实际工作中可能会遇到这样的需求,将多个关联的表格整合到一张表中。
2. 解决思路
Map 端的主要工作:为来自不同表或文件的 key/value 对,打标签以区别不同来源的记 录。然后用连接字段作为 key,其余部分和新加的标志作为 value,最后进行输出。
Reduce 端的主要工作:在 Reduce 端以连接字段作为 key 的分组已经完成,我们只需要 在每一个分组当中将那些来源于不同文件的记录(在 Map 阶段已经打标志)分开,最后进 行合并就 ok 了。
3.代码实现
3.1编写TableBean类
package com.yangmin.mapreduce.reduceJoin;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class TableBean implements Writable {
private String id; //订单id
private String pid; //商品pid
private int amount; //商品数量
private String pname; //商品名称
private String flag; //标记来自哪个表格
public TableBean() {
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getPid() {
return pid;
}
public void setPid(String pid) {
this.pid = pid;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public String getPname() {
return pname;
}
public void setPname(String pname) {
this.pname = pname;
}
public String getFlag() {
return flag;
}
public void setFlag(String flag) {
this.flag = flag;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.id);
out.writeUTF(this.pid);
out.writeInt(amount);
out.writeUTF(this.pname);
out.writeUTF(this.flag);
}
@Override
public void readFields(DataInput in) throws IOException {
this.id = in.readUTF();
this.pid = in.readUTF();
this.amount = in.readInt();
this.pname = in.readUTF();
this.flag = in.readUTF();
}
@Override
public String toString() {
return id + '\t' + amount + "\t" + pname + "\t";
}
}
3.2 编写mapper类
package com.yangmin.mapreduce.reduceJoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class TableMapper extends Mapper<LongWritable, Text,Text,TableBean> {
private String filename; //文件名
private Text outK = new Text(); //输出的key
private TableBean outV = new TableBean(); //输出的value
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//确定当前切片来自哪个文件,并获取文件名
InputSplit split = context.getInputSplit();
FileSplit fileSplit = (FileSplit) split;
filename = fileSplit.getPath().getName();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//取出出一行
String s = value.toString();
//判断是哪个文件,然后针对文件进行不同的操作
if (filename.contains("order")){ //处理订单表
//切割
String[] split = s.split("\t");
//封装outk
outK.set(split[1]);
//封装outV
outV.setId(split[0]);
outV.setPid(split[1]);
outV.setAmount(Integer.parseInt(split[2]));
outV.setPname("");
outV.setFlag("order");
}else { //处理商品表
//切割
String[] split = s.split("\t");
//封装outK
outK.set(split[0]);
//封装outV
outV.setId("");
outV.setPid(split[0]);
outV.setPname(split[1]);
outV.setAmount(0);
outV.setFlag("pd");
}
//写出
context.write(outK, outV);
}
}
3.3 编写reducer类
package com.yangmin.mapreduce.reduceJoin;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
public class TableReducer extends Reducer<Text,TableBean,TableBean,NullWritable> {
@Override
protected void reduce(Text key, Iterable<TableBean> values, Context context) throws IOException, InterruptedException {
//准备初始化
ArrayList<TableBean> orderBean = new ArrayList<>();
TableBean pdBean = new TableBean();
// 循环遍历
for (TableBean value : values) {
if ("order".equals(value.getFlag())){
TableBean tmpBean = new TableBean(); //创建新的对象
try {
BeanUtils.copyProperties(tmpBean, value); //将对象属性赋予新的对象
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
orderBean.add(tmpBean);
}else {
try {
BeanUtils.copyProperties(pdBean, value);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
}
}
//遍历orderBean,并setPaname
for (TableBean bean : orderBean) {
bean.setPname(pdBean.getPname());
context.write(bean,NullWritable.get() );
}
}
}
3.4 编写Driver类
package com.yangmin.mapreduce.reduceJoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class TableDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//获取Job对象
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
//关联map和reduce
job.setMapperClass(TableMapper.class);
job.setReducerClass(TableReducer.class);
//设置map端输出KV
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TableBean.class);
//设置最终输出KV
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(TableBean.class);
//设置程序的输入输出路径
FileInputFormat.setInputPaths(job, new Path("C:\\ZProject\\bigdata\\input\\inputtable"));
FileOutputFormat.setOutputPath(job,new Path("C:\\ZProject\\bigdata\\output\\Join_Table"));
//提交Job
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
4.总结
缺点:这种方式中,合并的操作是在 Reduce 阶段完成,Reduce 端的处理压力太大,Map 节点的运算负载则很低,资源利用率不高,且在 Reduce 阶段极易产生数据倾斜。
下一篇博文将会解决这个问题,期待你的关注!!