1)需求:
订单数据表t_order:
id | pid | amount |
1001 | 01 | 1 |
1002 | 02 | 2 |
1003 | 03 | 3 |
商品信息表t_product
pid | pname |
01 | 小米 |
02 | 华为 |
03 | 格力 |
将商品信息表中数据根据商品pid合并到订单数据表中。
最终数据形式:
id | pname | amount |
1001 | 小米 | 1 |
1004 | 小米 | 4 |
1002 | 华为 | 2 |
1005 | 华为 | 5 |
1003 | 格力 | 3 |
1006 | 格力 | 6 |
思路
需求1:Reduce端表合并(数据倾斜)
通过将关联条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联。
核心:定义谁为key ,order表选pid为key,product表选pid为key,map切割后,在reduce端进行表的合并。
构造实体类,为5项。
private int id; //订单id
private int pid;//产品id
private int amount;//产品数量
private String pname;//产品名称
private boolean flag;//标记 true是订单表 false是产品表
1)创建商品和订合并后的bean类
package com.lzz.mapreduce.reducejoin;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
public class TableBean implements Writable{
private int id; //订单id
private int pid;//产品id
private int amount;//产品数量
private String pname;//产品名称
private boolean flag;//标记 true是订单表 false是产品表
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(id);
out.writeInt(pid);
out.writeInt(amount);
out.writeUTF(pname);//没有String选择UTF
out.writeBoolean(flag);
}
@Override
public void readFields(DataInput in) throws IOException {
this.id=in.readInt();
this.pid=in.readInt();
this.amount=in.readInt();
this.pname=in.readUTF();
this.flag=in.readBoolean();
}
public TableBean() {
super();
// TODO Auto-generated constructor stub
}
public TableBean(int id, int pid, int amount, String pname, boolean flag) {
super();
this.id = id;
this.pid = pid;
this.amount = amount;
this.pname = pname;
this.flag = flag;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public String getPname() {
return pname;
}
public void setPname(String pname) {
this.pname = pname;
}
public boolean isFlag() {
return flag;
}
public void setFlag(boolean flag) {
this.flag = flag;
}
// 最终数据输出形式:
// id pname amount
@Override
public String toString() {
return id +"\t"+ pname+ "\t" + amount ;
}
}
2)编写TableMapper程序(根据order表和pd表每行的个数区分两个表)
package com.lzz.mapreduce.reducejoin;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
//输出key是pid value是bean对象
public class TableMappper extends Mapper<LongWritable, Text,Text, TableBean>{
Text k=new Text();
TableBean tableBean=new TableBean();
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line=value.toString();
String[] words=line.split("\t");
if(words.length==3) {
k.set(words[1]);
tableBean.setId(Integer.parseInt(words[0]));
tableBean.setPid(Integer.parseInt(words[1]));
tableBean.setAmount(Integer.parseInt(words[2]));
tableBean.setPname("");
tableBean.setFlag(true);
}else {
k.set(words[0]);
tableBean.setPid(Integer.parseInt(words[0]));
tableBean.setPname(words[1]);
tableBean.setFlag(false);
}
context.write(k, tableBean);
// 1001 01 1 01 1 1001
// 1002 02 2 01 4 1004
// 1003 03 3 01 小米
// 1004 01 4 排序(key为pid) 02 2 1002
// 1005 02 5 02 5 1005
// 1006 03 6 02 华为
// 01 小米 03 3 1003
// 02 华为 03 6 1006
// 03 格力 03 格力
}
}
2)编写TableMapper程序(根据order表和pd表名称区分两个表)
package com.lzz.mapreduce.reducejoin;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class TableMapper2 extends Mapper<LongWritable,Text, Text, TableBean>{
Text k=new Text();
TableBean v2=new TableBean();
@Override
protected void map(LongWritable key, Text value,Context context)
throws IOException, InterruptedException {
//区分两张表
FileSplit split=(FileSplit)context.getInputSplit();
String name=split.getPath().getName();//获取表名
String line=value.toString();
String[] words=line.split("\t");
//订单表
if(name.startsWith("order")) {
k.set(words[1]);
TableBean v=new TableBean(Integer.parseInt(words[0]),Integer.parseInt(words[1]),Integer.parseInt(words[2]),"",true);
v2=v;
}else {//产品表
k.set(words[0]);
TableBean v=new TableBean(0,Integer.parseInt(words[0]),0,words[1],false);
v2=v;
}
context.write(k, v2);
}
}
3)编写TableReducer程序(核心)
package com.lzz.mapreduce.reducejoin;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class TableReducer extends Reducer<Text, TableBean, TableBean, NullWritable>{
@Override
//reduce处理key=01的一组的数据,用产品表中pname替换订单表中的pid
// id pid ammount pname flag
// 01 1 1001 "" true orderBeans
// 01 4 1004 "" true orderBeans
// 01 0 0 小米 false pdBean
protected void reduce(Text key, Iterable<TableBean> values,
Context context) throws IOException, InterruptedException {
ArrayList<TableBean> orderBeans=new ArrayList<>();//订单
//2准备bean对象 存放一条产品信息
//01 小米
TableBean pdBean=new TableBean();//产品
for (TableBean value : values) {
if(value.isFlag()==true) {//订单表
//拷贝传递过来的每条订单数据到集合中
TableBean orderBean=new TableBean();
try {
BeanUtils.copyProperties(orderBean, value);
//因为是从 Iterable里面出来的数据,因此需要BeanUtils.copyProperties(不然新的一条数据会覆盖上一条),
//其他的直接orderBeans.add(value)即可
} catch (Exception e) {
e.printStackTrace();
}
orderBeans.add(orderBean);//里面是多条订单
}else {//产品表
try {
//拷贝传递过来的产品表到内存中
BeanUtils.copyProperties(pdBean, value);//一条产品信息
} catch (Exception e) {
e.printStackTrace();
}
}
}
//3表的拼接
for (TableBean bean : orderBeans) {
bean.setPname(pdBean.getPname());//与TableBean里面写的toString相对应
// 01 1 1001 小米
//toString id pname amount
//4数据写出去
context.write(bean, NullWritable.get());
}
}
}
4)编写TableDriver程序
package com.lzz.mapreduce.reducejoin;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TableDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args=new String[] {"g:/input/reducejoin","g:/output3/reducejoin"};
Configuration conf=new Configuration();
Job job=Job.getInstance(conf);
job.setJarByClass(TableDriver.class);
job.setMapperClass(TableMapper2.class);
job.setReducerClass(TableReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TableBean.class);
job.setOutputKeyClass(TableBean.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean res=job.waitForCompletion(true);
System.exit(res?0:1);
}
}
运行结果