1)使用场景
Map Join适用于一张表十分小、一张表很大的场景。
2)优点
思考:在Reduce端处理过多的表,非常容易产生数据倾斜。怎么办?
在Map端缓存多张表,提前处理业务逻辑,这样增加Map端业务,减少Reduce端数据的压力,尽可能的减少数据倾斜。
3)具体办法:采用DistributedCache
(1)在Mapper的setup阶段,将文件读取到缓存集合中。
(2)在Driver驱动类中加载缓存。
//缓存普通文件到Task运行节点。
//缓存普通文件到Task运行节点。
job.addCacheFile(new URI("file:///e:/cache/pd.txt"));
//如果在集群上运行
Job.addCacheFile(new
URI(“hdfs://hadoop102:9820/cache/pd.txt”)
Map Join案例实操
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* @author zrr
* @Create 2020-07-2020/7/24 20:29
*/
public class MapjoinDriver {
public static void main(String[] args) throws Exception{
// 1 获取job信息
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 2 设置加载jar包路径
job.setJarByClass(MapjoinDriver.class);
// 3 关联map
job.setMapperClass(MapJoinMapper.class);
// 4 设置Map输出KV类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
// 5 设置最终输出KV类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 加载缓存数据
job.addCacheFile(new URI("file:///D:/input/inputcache/pd.txt"));//小表
// Map端Join的逻辑不需要Reduce阶段,设置reduceTask数量为0
job.setNumReduceTasks(0);
// 6 设置输入输出路径
FileInputFormat.setInputPaths(job, new Path("D:\\input\\inputtable2"));//输入大表
FileOutputFormat.setOutputPath(job, new Path("D:\\hadoop\\mapjoin077887"));
// 7 提交
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
/**
* @author zrr
* @Create 2020-07-2020/7/24 20:30
*/
public class MapJoinMapper extends Mapper<LongWritable,Text, Text, NullWritable> {
HashMap<String, String> pdMap=new HashMap<>();
Text outK=new Text();
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//获取在driver添加的缓存文件
URI[] cacheFiles = context.getCacheFiles();
//获取fs对象
FileSystem fs = FileSystem.get(context.getConfiguration());
FSDataInputStream fis = fs.open(new Path(cacheFiles[0]));
BufferedReader reader=new BufferedReader(new InputStreamReader(fis,"UTF-8"));
String line;
while (StringUtils.isNotEmpty(line=reader.readLine())){
//01 小米
String[] split = line.split("\t");
pdMap.put(split[0],split[1]);
}
IOUtils.closeStream(reader);
// fs.close();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取一行数据 1001 01 1
String line = value.toString();
//切割
String[] split = line.split("\t");
//通过切割出来的pid,去pdMap里面获取到pname
String pname = pdMap.get(split[1]);
//封装outK
outK.set(split[0]+"\t"+pname+"\t"+split[2]);
//写出
context.write(outK,NullWritable.get());
}
}