案例:
需求:我们需要对一个商品表和一个订单表进行操作,统计出商品表中每一个商品对应的订单数目。
原始数据:
//商品表:commodity.txt
p0001,小米5,1000,2000
p0002,锤子T1,1000,3000
p0003,华为,1000,5000
p0004,红米K30,1999,2500
//订单表:order.txt
1001,20150710,p0001,2
1002,20150710,p0002,3
1003,20110501,p0004,5
1004,20200502,p0003,9
方法一:Reduce端实现JOIN操作
分析:通过将关联的条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联。
Step 1:定义Mapper
package org.example.mapreduce.ReduceJoin;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class ReduceJoinMapper extends Mapper<LongWritable, Text,Text,Text> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//方法一:
//判断数据来自哪个文件
FileSplit fileSplit = (FileSplit) context.getInputSplit();
//获得文件名
String fileName = fileSplit.getPath().getName();
//对每一行的数据进行切割
String[] split = value.toString().split(",");
if(fileName.equals("commodity.txt")){
//数据来自商品表
//将K1和V1转为K2和V2,写入上下文中
context.write(new Text(split[0]), value);
}else{
//数据来自订单表
//将K1和V1转为K2和V2,写入上下文中
context.write(new Text(split[2]), value);
}
//方法二:
// String[] split = value.toString().split(",");
// if (value.toString().startsWith("p")) {
// context.write(new Text(split[0]),value);
// } else {
// //将K1和V1转为K2和V2,写入上下文中
// context.write(new Text(split[2]),value);
// }
}
}
Step 2: 定义 Reducer
package org.example.mapreduce.ReduceJoin;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class ReduceJoinReduce extends Reducer<Text,Text,Text,Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
String first = "";
String second = "";
for (Text value : values) {
if(value.toString().startsWith("p")){
//以p开头的是商品表
first = value.toString();
}else{
//在商品表中,可能一个商品有多个订单,因此将订单累加起来。
second += value.toString();
}
}
context.write(key, new Text(first+"\t"+second));
}
}
Step 3: 定义主类
package org.example.mapreduce.ReduceJoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class ReduceJoinMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
//创建一个任务对象!
Job job = Job.getInstance(super.getConf(), "Reduce_join");
//配置job任务
//指定文件的读取方式和读取路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path("file:///C:\\Myprogram\\IN\\reduce_join"));
//指定mapper的处理方式和数据类型
job.setMapperClass(ReduceJoinMapper.class);
//从mapper阶段结束是,输出的数据类型
//指定从mapper阶段结束时输出的K2数据类型
job.setMapOutputKeyClass(Text.class);
//指定从mapper阶段结束时输出的2数据类型
job.setMapOutputValueClass(Text.class);
//指定reduce的处理方式和数据类型
job.setReducerClass(ReduceJoinReduce.class);
//指定从reduce阶段结束时输出的K3数据类型
job.setOutputKeyClass(Text.class);
//指定从reduce阶段结束时输出的V3数据类型
job.setOutputValueClass(Text.class);
//设置输出类型
job.setOutputFormatClass(TextOutputFormat.class);
//设置输出路径
TextOutputFormat.setOutputPath(job,new Path("file:///C:\\Myprogram\\OUT\\reduce_join_out"));
boolean bl = job.waitForCompletion(true);
return bl?0:1;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration, new ReduceJoinMain(), args);
System.exit(run);
}
}
结果文件:
p0001 p0001,小米5,1000,2000 1001,20150710,p0001,2
p0002 p0002,锤子T1,1000,3000 1002,20150710,p0002,3
p0003 p0003,华为,1000,5000 1004,20200502,p0003,9
p0004 p0004,红米K30,1999,2500 1003,20110501,p0004,5
方法二: Map端实现 JOIN
概述:适用于关联表中有小表的情形.使用分布式缓存,可以将小表分发到所有的map节点,这样,map节点就可以在本地对自己所读到的大表数据进行join并输出最终结果,可以大大提高join操作的并发度,加快处理速度。
分析:先在mapper类中预先定义好小表(商品表),然后再进行join操作。
Step 1:定义Mapper
package org.example.mapreduce.MapperJoin;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
public class MapperJoinMapper extends Mapper<LongWritable, Text,Text,Text> {
private HashMap<String,String> map = new HashMap<>();
//将分布式缓存的小表数据读取到本地map集合中(一次读取)
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//获取分布式缓存列表
URI[] cacheFiles = context.getCacheFiles();
//获取指定的分布式缓存文件的文件系统(FileSystem)
FileSystem fileSystem = FileSystem.get(cacheFiles[0], context.getConfiguration());
//获取文件的输入流
FSDataInputStream inputStream = fileSystem.open(new Path(cacheFiles[0]));
//读取文件内容,将数据存入Map集合
//将字节输入流转化为字符流再转化为字符缓冲流,这样就可以进行一行一行的读取
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
String line = null;
//一行一行读取
while ((line = bufferedReader.readLine()) != null) {
String[] split = line.split(",");
//存入map中
map.put(split[0],line);
}
//关闭流
bufferedReader.close();
fileSystem.close();
}
//对大表进行处理,而且要实现大表和小表的Join操作
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] split = value.toString().split(",");
//商品编号
String commodityID = split[2];
//商品 + 订单
String commodityLine = map.get(commodityID) + "\t" +value.toString();
context.write(new Text(commodityID),new Text(commodityLine));
}
}
Step 2:定义主类
package org.example.mapreduce.MapperJoin;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.net.URI;
public class MapperJoinMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
//获取Job任务对象
Job job = Job.getInstance(super.getConf(),"Mapper_Join");
//设置Job任务对象
//将小表放入分布式缓存中
job.addCacheFile(new URI("hdfs://192.168.32.132:8020/MapperJoin/commodity.txt"));
//指定输入类和输入的路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path("file:///C:\\Myprogram\\IN\\mapper_join"));
//指定mapper类和数据类型
job.setMapperClass(MapperJoinMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//指定输出类和输出路径
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///C:\\Myprogram\\OUT\\mapper_join_out"));
boolean bl = job.waitForCompletion(true);
return bl?0:1;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration,new MapperJoinMain(),args);
System.exit(run);
}
}
结果文件:
p0001 p0001,小米5,1000,2000 1001,20150710,p0001,2
p0002 p0002,锤子T1,1000,3000 1002,20150710,p0002,3
p0003 p0003,华为,1000,5000 1005,20200502,p0003,9
p0003 p0003,华为,1000,5000 1004,20200502,p0003,9
p0004 p0004,红米K30,1999,2500 1003,20110501,p0004,5
完美撒花!