通过将关联的条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件
信息,发往同一个reduce task,在reduce中进行数据的串联
Step 1: 定义 Mapper:
package com.hadoop;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class ReduceMapper extends Mapper<LongWritable, Text,Text,Text> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1.判断数据来自哪个文件
FileSplit fileSplit = (FileSplit)context.getInputSplit();
String fileName = fileSplit.getPath().getName();
if(fileName.equals("product.txt")){
// 数据来自商品表
String[] split = value.toString().split(",");
String productId = split[0];
context.write(new Text(productId),value);
}else{
// 数据来自订单表
String[] split = value.toString().split(",");
String productId = split[2];
context.write(new Text(productId),value);
}
}
}
Step 2: 定义 Reducer
package com.hadoop;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class ReduceJoinReducer extends Reducer<Text,Text,Text,Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
// 遍历获取V3
String first = "";
String second = "";
for (Text value : values) {
if(value.toString().startsWith("p")){
first = value.toString();
}else{
second = value.toString();
}
}
//2. 将K2 和V2写入上下文中
context.write(key,new Text(first + "\t"+ second));
}
}
Step 3: 定义主类
package com.hadoop;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
// 1.获取job对象
Job job = Job.getInstance(super.getConf(), "reduceMap");
// 2.设置job任务
// 第一步: 设置输入类和输入路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("file:///E:\\input\\map_join"));
// 第二步: 设置Mapper类和数据类型
job.setMapperClass(ReduceMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
// 第三,四,五,六
//第七步: 设置Reduce的类 和类型
job.setReducerClass(ReduceJoinReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// 设置输出类 和输出路径
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///E:\\out\\map_join_out"));
// 3. 等待任务结束
boolean b = job.waitForCompletion(true);
return b ? 0 :1 ;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration, new JobMain(), args);
System.exit(run);
}
}
3. 案例: Map端实现 JOIN
.1 概述
适用于关联表中有小表的情形.
使用分布式缓存,可以将小表分发到所有的map节点,这样,map节点就可以在本地对自己所
读到的大表数据进行join并输出最终结果,可以大大提高join操作的并发度,加快处理速度
3.2 实现步骤
先在mapper类中预先定义好小表,进行join
引入实际场景中的解决方案:一次加载数据库或者用
Step 1:定义Mapper:
package com.hadoop;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
public class ReduceMap extends Mapper<LongWritable, Text,Text,Text> {
private HashMap<String, String> map = new HashMap<String, String>();
//第一件事: 将分布式缓存的小表数据读取到本地Map集合只需要做一次
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//1. 获取分布式缓存文件列表
URI[] cacheFiles = context.getCacheFiles();
// 2. 获取指定的分布式文件系统
FileSystem fileSystem = FileSystem.get(cacheFiles[0], context.getConfiguration());
//3.获取文件的输入流
FSDataInputStream inputStream = fileSystem.open(new Path(cacheFiles[0]));
// 4. 读取文件内容 并将数据存入到Map集合
// 4.1 将字节输入流转为字符缓冲流
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
// 4.2 读取小文件 以行为单位, 并将读取的数据存入Map 集合中
String line = null;
while ((line = bufferedReader.readLine()) != null ){
String[] split = line.split(",");
map.put(split[0], line);
}
bufferedReader.close();
fileSystem.close();
}
// 第二件事:对大表的处理业务逻辑,而且要实现大表小表的join操作
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//从行文本中获取商品ID
String[] split = value.toString().split(",");
//K2
String productId = split[2];
String productLine = map.get(productId);
String valueLine = productLine + "\t" + value.toString();
// 3.将K2 和 V2 写入上下文中
context.write(new Text(productId),new Text(valueLine));
}
}
Step 2:定义主类:
package com.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.net.URI;
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
//1.获取job对象
Job job = Job.getInstance(super.getConf(), "hashJob");
//2.设置job对象
// 将小表放入分布式缓存中
job.addCacheFile(new URI("hdfs://192.168.154.100:8020/cache_file/product.txt"));
// 第一步: 设置输入类和输入的路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("file:///E:\\input\\mapper_join_hashmap"));
// 第二步: 设置Mapper类和数据类型
job.setMapperClass(ReduceMap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
// 第八步: 设置输出类和输出路径
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///E:\\out\\mapper_join_out_hashmap"));
boolean b = job.waitForCompletion(true);
return b ? 0 : 1;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration, new JobMain(), args);
System.exit(run);
}
}