hadoop_mapreduce06-ReduceJoin案例
(一)需求
订单数据表order.txt (分隔符是一个tab 这里未做修改)
id pid amount
1001 01 1
1002 02 2
1003 03 3
1001 01 1
1002 02 2
1003 03 3
商品信息表pd.txt
pid pname
01 小米
02 华为
03 格力
最终数据:将商品信息表中的pname根据pid合并到订单数据表中
id pname amount
1001 小米 1
1001 小米 1
1002 华为 2
1002 华为 2
1003 格力 3
1003 格力 3
(二)需求分析
(三)代码实现
OrderPdBean.java
package com.art.mapreduce.reducejoin;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* 原始数据
* order.txt
* id pid amount
* 1001 01 1
* <p>
* pd.txt
* pid pname
* 01 小米
* <p>
* 预期数据
* orderId pid amount panme tname
* 02 1002 2 null order
* 03 1003 3 null order
* null 01 0 小米 pd
* null 02 0 华为 pd
*/
public class OrderPdBean implements Writable {
// 1. 属性
private String orderId;
private String pid;
private int amount;
private String pname;
private String tname;
// 2. 反序列化的时候一定要空参构造函数 cmd+N 语法糖super();调用父类的构造函数
public OrderPdBean() {
super();
}
// 有参构造函数
public OrderPdBean(String orderId, String pid, int amount, String pname, String tname) {
super();
this.orderId = orderId;
this.pid = pid;
this.amount = amount;
this.pname = pname;
this.tname = tname;
}
// 3. 写序列化方法 接口一定要重写的方法
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(orderId);
out.writeUTF(pid);
out.writeInt(amount);
out.writeUTF(pname);
out.writeUTF(tname);
}
// 4. 重写读序列化方法 顺序一定要和写序列化方法一致(队列)
@Override
public void readFields(DataInput in) throws IOException {
this.orderId = in.readUTF();
this.pid = in.readUTF();
this.amount = in.readInt();
this.pname = in.readUTF();
this.tname = in.readUTF();
}
// 5.get+set方法 cmd+n
public String getOrderId() {
return orderId;
}
public void setOrderId(String orderId) {
this.orderId = orderId;
}
public String getPid() {
return pid;
}
public void setPid(String pid) {
this.pid = pid;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public String getPname() {
return pname;
}
public void setPname(String pname) {
this.pname = pname;
}
public String getTname() {
return tname;
}
public void setTname(String tname) {
this.tname = tname;
}
// 6. toString cmd+n
@Override
public String toString() {
return "OrderPdBean{" +
"orderId='" + orderId + '\'' +
", pid='" + pid + '\'' +
", amount=" + amount +
", pname='" + pname + '\'' +
", tname='" + tname + '\'' +
'}';
}
}
ReduceJoinMapper.java
package com.art.mapreduce.reducejoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
import static java.lang.System.exit;
/**
* Map端要做的事情是给每行数据打上tname的标签
* 写出去的时候为 key:pid value:OrderPdBean
* <p>
* 原始数据
* order.txt
* id pid amount
* 1001 01 1
* <p>
* pd.txt
* pid pname
* 01 小米
* <p>
* 预期数据
* orderId pid amount panme tname
* 02 1002 2 null order
* 03 1003 3 null order
* null 01 0 小米 pd
* null 02 0 华为 pd
*/
public class ReduceJoinMapper extends Mapper<LongWritable, Text, Text, OrderPdBean> {
// 2.0 属性 主要为了写出去的k和v
String tname;
OrderPdBean OPBean = new OrderPdBean();
Text k = new Text();
// 2.1 通过setup方法获取文件名
@Override
protected void setup(Context context) throws IOException, InterruptedException { // 要debug一下每个context里存了写什么 地址
// super.setup(context);
FileSplit fileSplit = (FileSplit) context.getInputSplit();// 注意:1.用的是getInputSplit 2.类型从InputSplit改成了FileSplit
tname = fileSplit.getPath().getName();
}
// 2.2 通过map方法标记每行数据
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 2.2.1 获取一行数据
String line = value.toString();
String[] fields;
if (tname.contains("order")) { // 这里是写死的 可考虑优化
fields = line.split("\t");
k.set(fields[1]);
OPBean.setOrderId(fields[0]);
OPBean.setPid(fields[1]);
OPBean.setAmount(Integer.parseInt(fields[2]));
OPBean.setPname("");
OPBean.setTname("order");
} else if (tname.contains("pd")) {
fields = line.split("\t");
k.set(fields[0]);
OPBean.setOrderId("");
OPBean.setPid(fields[0]);
OPBean.setAmount(0);
OPBean.setPname(fields[1]);
OPBean.setTname("pd");
} else {
System.out.println("既不是order文件也不是pd文件");
exit(1);
}
context.write(k, OPBean);
}
}
ReduceJoinReducer.java
package com.art.mapreduce.reducejoin;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import static java.lang.System.exit;
/**
* reduce端的目标:
* map传进来 k=pid value=OrderPdBean 且有序
* 希望Reducer写出去 k=pid v=OrderPdBean 且tname=order的 pname从null替换为具体值
* <p>
* map传进来:
* key=02
* value=如下
* orderId pid amount panme tname
* 02 1002 2 null order
* 02 1002 2 null order
* null 02 0 华为 pd
* <p>
* reduce传出去
* key=如下
* orderId pid amount panme tname
* 02 1002 2 华为 order
* 02 1002 2 华为 order
* null 02 0 华为 pd
* value=null
*/
public class ReduceJoinReducer extends Reducer<Text, OrderPdBean, OrderPdBean, NullWritable> {
// 3.1 属性
// 3.1.1 存放所有的order对象list
ArrayList<OrderPdBean> orderBeans = new ArrayList<OrderPdBean>();
// 3.1.2 存放pd对象(观察数据:相同的key传入一个reduce,此时pd只有一条数据,维表)
OrderPdBean pdBean = new OrderPdBean();
// 3.1.3 由于for里面的orderBean是引用而非对象,不能起到真正的copy 所以要new一个tempBean来存储写入
OrderPdBean tempBean = new OrderPdBean();
// 3.2 重写reduce方法
@Override
protected void reduce(Text key, Iterable<OrderPdBean> values, Context context) throws IOException, InterruptedException {
// super.reduce(key, values, context);
for (OrderPdBean orderPdBean : values) {
// 3.2.1 订单表的放进数组内存 (优化思考:数据倾斜或内存满,考虑map端做combine)
if ("order".equals(orderPdBean.getTname())) {
try {
BeanUtils.copyProperties(tempBean, orderPdBean);
orderBeans.add(tempBean);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
// 3.2.2 产品表的放进产品bean
} else if ("pd".equals(orderPdBean.getTname())) {
try {
BeanUtils.copyProperties(pdBean, orderPdBean);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
} else {
System.out.println("非打标order或者pd的数据");
exit(1);
}
}
// 3.2.3 遍历orderBeans列表 将pname替换为pdBean的pname
for (OrderPdBean orderBean : orderBeans) {
try {
BeanUtils.copyProperty(orderBean, "pname", pdBean.getPname());
// 3.2.4 数据写出去
context.write(orderBean, NullWritable.get());
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
}
}
}
ReduceJoinDriver.java
package com.art.mapreduce.reducejoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class ReduceJoinDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{"/Users/art/Documents/#Java_original_script/hadoop_mr_0000/mr_reducejoin_0000/src/main/java/com/art/mapreduce/reducejoin/datas/inputs", "/Users/21/Documents/#Java_original_script/hadoop_mr_0000/mr_reducejoin_0000/src/main/java/com/art/mapreduce/reducejoin/datas/outputs"};
// 1. 创建一个job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2. 设置map类 map写出kv类
job.setMapperClass(ReduceJoinMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(OrderPdBean.class);
// 3. 设置reduce类 reduce写出kv类
job.setReducerClass(ReduceJoinReducer.class);
job.setOutputKeyClass(OrderPdBean.class);
job.setOutputValueClass(NullWritable.class);
// 4. 指定本程序jar包所在的路径
job.setJarByClass(ReduceJoinDriver.class);
// 5. 设置读写文件路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 6. 将job中的配置相关参数,以及job所用的java类所在jar包提交到yarn上运行
boolean result = job.waitForCompletion(true);
System.exit((result) ? 0 : 1);
}
}
/**
* outputs结果-- 从数据考虑 是否可以改成 将pdBean中的orderId amount 替换为OrderBeans的? 否
* OrderPdBean{orderId='1001', pid='01', amount=1, pname='小米', tname='order'}
* OrderPdBean{orderId='1001', pid='01', amount=1, pname='小米', tname='order'}
* OrderPdBean{orderId='1002', pid='02', amount=2, pname='华为', tname='order'}
* OrderPdBean{orderId='1002', pid='02', amount=2, pname='华为', tname='order'}
* OrderPdBean{orderId='1002', pid='02', amount=2, pname='华为', tname='order'}
* OrderPdBean{orderId='1002', pid='02', amount=2, pname='华为', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* OrderPdBean{orderId='1003', pid='03', amount=3, pname='格力', tname='order'}
* <p>
* /
/**
遇到的报错
Error: java.lang.ClassCastException: org.apache.hadoop.mapreduce.lib.input.FileSplit cannot be cast to org.apache.hadoop.mapred.FileSplit
hadoop 执行报错,类别: java.lang.ClassCastException
说明类的类型转换错误。详细原因是:
org.apache.hadoop.mapreduce.lib.input.FileSplit
不能被转换为:
org.apache.hadoop.mapred.FileSplit
原因分析:使用 FileSplit 时候,导入包发生错误。
解决方法:将导入包由 org.apache.hadoop.mapred.FileSplit
替换为 org.apache.hadoop.mapreduce.lib.input.FileSplit
*/
log4j.properties
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
pom.xml
<dependencies>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
</dependencies>
(四)性能优化 在map端 combine
略
done 未发布