Hadoop
中的MapReduce
是一种编程模型,用于大规模数据集的并行运算
下面的连接是我的MapReduce
系列博客~配合食用效果更佳!
一、ReduceJoin的缺点与MapJoin优点
在上一篇博客中,使用ReduceJoin
完成了对两张表的分析,合并的操作是在Reduce
阶段完成的,在集群工作中,ReduceTask
的个数会少于MapTask
的个数,Reduce
端的处理压力太大,Map
节点的运算负载则很低,资源利用率不高,且在Reduce
阶段极易产生数据倾斜。
它的解决方案就是在Map
端实现数据合并,Map Join
适用于一张表十分小,一张表很大的场景,在Map
端缓存多张表,提前处理业务逻辑,这样增加Map
端业务,减少Reduce
端数据的压力,尽可能的减少数据倾斜。
二、实现MapJoin的步骤
采用DistributedCache分布式缓存
- 在
Mapper
的setup初始化
阶段,将文件读取到缓存集合中 - 在
Driver
驱动类中加载缓存://缓存windows文件到Task运行节点 job.addCacheFile(new URI("file:///文件真实路径")) //如果时集群运行,则需要设置HDFS路径 job.addCacheFile(new URI("hdfs://hadoop102:8020/文件路径"))
三、案例实操
1、Driver类
package com.wzq.mapreduce.mapjoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class TableDriver {
public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException, ClassNotFoundException {
Job job = Job.getInstance(new Configuration());
job.setJarByClass(TableDriver.class);
job.setMapperClass(TableMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//设置ReduceTask个数为0
job.setNumReduceTasks(0);
//获取cache文件
job.addCacheFile(new URI("file:///D:/BigData_workspace/input/tablecache/pd.txt"));
FileInputFormat.setInputPaths(job, new Path("D:\\BigData_workspace\\input\\inputtable2"));
FileOutputFormat.setOutputPath(job, new Path("D:\\BigData_workspace\\output\\outputtable2"));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
2、Mapper类
package com.wzq.mapreduce.mapjoin;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
public class TableMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
private HashMap<String, String> pd = new HashMap<>();
private Text outK = new Text();
//任务开始之前把缓存区数据读进一个集合
@Override
protected void setup(Context context) throws IOException, InterruptedException {
URI[] cacheFiles = context.getCacheFiles();
Path path = new Path(cacheFiles[0]);
//获取系统文件对象,并开流
FileSystem fs = FileSystem.get(context.getConfiguration());
FSDataInputStream fis = fs.open(path);
//通过包装流转换为reader,方便按行读取
BufferedReader reader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
String line;
while (StringUtils.isNotEmpty(line = reader.readLine())) {
String[] split = line.split("\t");
//0代表pid,1代表商品名称
pd.put(split[0], split[1]);
}
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//读取大表数据
String[] fileds = value.toString().split("\t");
//读取大表的每行数据,去pdMap取出name
String pname = pd.get(fileds[1]);
//封装
outK.set(fileds[0] + "\t" + pname + "\t" + fileds[2]);
//写出
context.write(outK,NullWritable.get());
}
}
测试结果: