如果在Reduce 端处理过多的表,容易出现数据倾斜,通常我们会在Map端缓存起来,提前把处理业务逻辑,减少Reduce端数据的压力,减少数据倾斜。
Map Join 适用于一张表十分小、一张表很大的场景。
Mapper的setup(初始化)的时候缓存到集合中,
job.addCacheFile(new URI("file:///D:/input/tablecache/pd.txt"));
代码如下:
创建MapJoinMapper,不需要Reducer
public class MapJoinMapper extends Mapper<LongWritable, Text, Text, NullWritable> { private HashMap<String, String> pdMap = new HashMap<>(); private Text outK = new Text(); @Override protected void setup(Context context) throws IOException, InterruptedException { // 获取缓存的文件,并把文件内容封装到集合 pd.txt URI[] cacheFiles = context.getCacheFiles(); FileSystem fs = FileSystem.get(context.getConfiguration()); FSDataInputStream fis = fs.open(new Path(cacheFiles[0])); // 从流中读取数据 BufferedReader reader = new BufferedReader(new InputStreamReader(fis, "UTF-8")); String line; while (StringUtils.isNotEmpty(line = reader.readLine())) { // 切割 String[] fields = line.split("\t"); // 赋值 pdMap.put(fields[0], fields[1]); } // 关流 IOUtils.closeStream(reader); } @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 处理 order.txt String line = value.toString(); String[] fields = line.split("\t"); // 获取pid String pname = pdMap.get(fields[1]); // 获取订单id 和订单数量 // 封装 outK.set(fields[0] + "\t" + pname + "\t" + fields[2]); context.write(outK, NullWritable.get()); } }
创建MapJoinDriver
// 加载缓存数据
job.addCacheFile(new URI("file:///D:/input/tablecache/pd.txt"));
// Map端Join的逻辑不需要Reduce阶段,设置reduceTask数量为0
job.setNumReduceTasks(0);
public class MapJoinDriver { public static void main(String[] args) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException { // 1 获取job信息 Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // 2 设置加载jar包路径 job.setJarByClass(MapJoinDriver.class); // 3 关联mapper job.setMapperClass(MapJoinMapper.class); // 4 设置Map输出KV类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(NullWritable.class); // 5 设置最终输出KV类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class); // 加载缓存数据 job.addCacheFile(new URI("file:///D:/input/tablecache/pd.txt")); // Map端Join的逻辑不需要Reduce阶段,设置reduceTask数量为0 job.setNumReduceTasks(0); // 6 设置输入输出路径 FileInputFormat.setInputPaths(job, new Path("D:\\input\\inputtable2")); FileOutputFormat.setOutputPath(job, new Path("D:\\hadoop\\output8888")); // 7 提交 boolean b = job.waitForCompletion(true); System.exit(b ? 0 : 1); } }