目录
Reduce Join
- map端: 将不同表或者不同文件中的相同字段信息作为key,并将不同表或者文件中剩下的信息作为value最后输出给reduce;
- reduce端:将每个分组的中来源不同的文件分开,最后再进行合并;
解决的问题
将不同文件进行结果
结合的结果
实例
编写TableBean 类
package ReduceJoin;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author 公羽
* date: 2021/8/21
* desc:
*/
public class TableBean implements Writable {
private String id; //
private String pid;
private int amount;
private String pname;
private String flag;
public TableBean() {
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getPid() {
return pid;
}
public void setPid(String pid) {
this.pid = pid;
}
public int getAmount() {
return amount;
}
public void setAmount(int amount) {
this.amount = amount;
}
public String getPname() {
return pname;
}
public void setPname(String pname) {
this.pname = pname;
}
public String getFlag() {
return flag;
}
public void setFlag(String flag) {
this.flag = flag;
}
@Override
public String toString() {
return id + "\t" + pname + "\t" + amount;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeUTF(id);
dataOutput.writeUTF(pid);
dataOutput.writeInt(amount);
dataOutput.writeUTF(pname);
dataOutput.writeUTF(flag);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
this.id = dataInput.readUTF();
this.pid = dataInput.readUTF();
this.amount = dataInput.readInt();
this.pname = dataInput.readUTF();
this.flag = dataInput.readUTF();
}
}
编写TableMapper类
package ReduceJoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
/**
* @author 公羽
* date: 2021/8/21
* desc:
*/
public class TableMapper extends Mapper<LongWritable, Text, Text, TableBean> {
private String filename;
private Text outK = new Text();
private TableBean outV = new TableBean();
@Override
protected void setup(Context context) throws IOException, InterruptedException {
InputSplit inputSplit = context.getInputSplit();
FileSplit fileSplit = (FileSplit) inputSplit;
filename = fileSplit.getPath().getName();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
if (filename.contains("order")){
String[] split = line.split("\t");
outK.set(split[1]);
outV.setId(split[0]);
outV.setPid(split[1]);
outV.setAmount(Integer.parseInt(split[2]));
outV.setPname("");
outV.setFlag("order");
}else {
String[] split = line.split("\t");
outK.set(split[0]);
outV.setId("");
outV.setPid(split[0]);
outV.setAmount(0);
outV.setPname(split[1]);
outV.setFlag("pd");
}
context.write(outK,outV);
}
}
编写TableDriver类
package ReduceJoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author 公羽
* date: 2021/8/21
* desc:
*/
public class TableDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// TODO 1.获取配置信息以及封装任务
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// TODO 2.设置jar加载路径
job.setJarByClass(TableDriver.class);
// TODO 3.设置map和reduce类
job.setMapperClass(TableMapper.class);
job.setReducerClass(TableReduce.class);
// TODO 4.设置map输出
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TableBean.class);
// TODO 5.设置最终输出kv类型
job.setOutputKeyClass(TableBean.class);
job.setOutputValueClass(NullWritable.class);
// TODO 6.设置输入和输出路径
FileInputFormat.setInputPaths(job, new Path("E:\\input\\"));
FileOutputFormat.setOutputPath(job, new Path("E:\\output"));
// TODO 7.提交
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
运行结果:
Map Join
Map Join 适用于一张表十分小、一张表很大的场景。
- 在 Mapper 的 setup 阶段,将文件读取到缓存集合中
- 在 Driver 驱动类中加载缓存
案例
编写MapJoinMapper类
package MapJoin;
import ReduceJoin.TableBean;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.io.Text;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
/**
* @author 公羽
* date: 2021/8/21
* desc:
*/
public class MapJoinMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
private Map<String, String> pdMap = new HashMap<>();
private Text text = new Text();
// 任务开始前将pd数据缓存进pdMap
@Override
protected void setup(Context context) throws IOException, InterruptedException {
// 通过缓存文件得到小表数据pd.txt
URI[] cacheFiles = context.getCacheFiles();
Path path = new Path(cacheFiles[0]);
// 获取文件系统对象,并开流
FileSystem fileSystem = FileSystem.get(context.getConfiguration());
FSDataInputStream fis = fileSystem.open(path);
// 通过包装流转换为reader,方便按行读取
BufferedReader reader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
// 逐行读取,按行处理
String line;
while (StringUtils.isNotEmpty(line = reader.readLine())){
String[] split = line.split("\t");
pdMap.put(split[0],split[1]);
}
// 关流
IOUtils.closeStream(reader);
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 读取大表数据
String[] fields = value.toString().split("\t");
// 通过大表每行数据的pid,去pdMap里面取出pname
String panem = pdMap.get(fields[1]);
// 将大表每行数据的pid替换为pname
text.set(fields[0] + "\t" + panem + "\t" + fields[2]);
// 写出
context.write(text,NullWritable.get());
}
}
编写MapJoinDriver
package MapJoin;
import org.apache.commons.lang.ObjectUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* @author 公羽
* date: 2021/8/21
* desc:
*/
public class MapJoinDriver {
public static void main(String[] args) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException {
// TODO 1.获取配置信息以及封装任务
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// TODO 2.设置jar加载路径
job.setJarByClass(MapJoinDriver.class);
// TODO 3.设置map和reduce类
job.setMapperClass(MapJoinMapper.class);
// TODO 4.设置map输出
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
// TODO 5.设置最终输出kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 加载缓存
job.addCacheFile(new URI("file:///E:/input/pd.txt"));
job.setNumReduceTasks(0);
// TODO 6.设置输入和输出路径
FileInputFormat.setInputPaths(job, new Path("E:\\input1"));
FileOutputFormat.setOutputPath(job, new Path("E:\\output"));
// TODO 7.提交
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
}
}