import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.Vector;
public class DX_Demo2 {
public static class Mymap extends Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable k1, Text v1, Context context) throws IOException, InterruptedException {
FileSplit inputSplit = (FileSplit) context.getInputSplit();
String path = inputSplit.getPath().toString();
if (path.contains("dianxin")) {
String[] split = v1.toString().split("\t");
if (split.length == 8) {
String key = split[2];
String v2 = "dianxin" + split[0] + "," + split[1] + "," + split[3] + "," + split[4];
context.write(new Text(key), new Text(v2));
}
}
if (path.contains("city_id")) {
String[] splits = v1.toString().split(",");
String key2 = splits[0];
String value2 = "city_id" + splits[1];
context.write(new Text(key2), new Text(value2));
}
}
}
public static class Myreduce extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text k2, Iterable<Text> v2s, Context context) throws IOException, InterruptedException {
Vector<String> vecA = new Vector<String>();
Vector<String> vecB = new Vector<String>();
for (Text t : v2s) {
String string = t.toString();
if (string.startsWith("dianxin")) {
vecA.add(string);
}
if (string.startsWith("city_id")) {
vecB.add(string);
}
}
for (String s : vecA) {
for (String ss : vecB) {
String value = s + ss;
context.write(k2, new Text(value));
}
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("mapreduce.job.queuename", "sparktest");
Job job = Job.getInstance(conf, DX_Demo2.class.getSimpleName());
job.setJarByClass(DX_Demo2.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.addInputPath(job, new Path(args[1]));
job.setMapperClass(Mymap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(Myreduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
}
- 根据输入的表的名称对数据进行切片,将两张表中相同的字段作为map,要连接的字段作为value,分别写入磁盘
- 从reduce中相同key的字段进行笛卡尔积,然后reduce输出去重
- 主函数中要指定三个路径的参数,两个处理的文件路径,一个结果的输出路径