hadoop实现单表和多表关联

补充一个单表链接的例子:

ublic class Single {

    private static class SingleMapper extends
            Mapper<LongWritable, Text, Text, Text> {

        @Override
        protected void map(LongWritable key, Text value,
                Mapper<LongWritable, Text, Text, Text>.Context context)
                throws IOException, InterruptedException {

            String string = value.toString();
            if (!string.contains("child")) {

                String[] strings = string.split(" ");
                context.write(new Text(strings[0]), new Text(strings[1] + ":1"));
                context.write(new Text(strings[1]), new Text(strings[0] + ":2"));

            }
        }
    }

    // reduce是执行key的次数
    private static class SingleReduce extends Reducer<Text, Text, Text, Text> {

        @Override
        protected void reduce(Text key, Iterable<Text> values,
                Reducer<Text, Text, Text, Text>.Context context)
                throws IOException, InterruptedException {

            List<String> left = Lists.newArrayList();
            List<String> right = Lists.newArrayList();

            for (Text value : values) {  

                String[] strings = value.toString().split(":");

                if (strings[1].equals("1")) {
                    right.add(strings[0]);
                } else {
                    left.add(strings[0]);
                }
            }

            for (String lef : left) {
                for (String rig : right) {
                    context.write(new Text(lef), new Text(rig));
                }
            }

        }

    }

    public static void main(String[] args) throws IOException,
            ClassNotFoundException, InterruptedException {

        Configuration configuration = HadoopConfig.getConfiguration();
        Job job = Job.getInstance(configuration, "单表连接");

        job.setJarByClass(Sort.class);
        job.setMapperClass(SingleMapper.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setReducerClass(SingleReduce.class);
        FileInputFormat.addInputPath(job, new Path("/data"));
        FileOutputFormat.setOutputPath(job, new Path("/single"));
        job.waitForCompletion(true);

    }

补充一个多链表

import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class MTjoin {

    public static class Map extends Mapper<LongWritable, Text, Text, Text>{
        private static Text k = new Text();
        private static Text v = new Text();

        protected void map(LongWritable key, Text value, Context context)
                throws java.io.IOException ,InterruptedException {
            String[] splits = value.toString().split("\t");
            if(splits.length != 2){
                return ;
            }

            //取得文件名 a.txt(工厂名字,序号) b.txt(序号,地址)
            String fileName = ((FileSplit)context.getInputSplit()).getPath().getName();
            if("a.txt".equals(fileName)){
                k.set(splits[1]);
                v.set("1"+splits[0]);
            }else if("b.txt".equals(fileName)){
                k.set(splits[0]);
                v.set("2"+splits[1]);
            }else{
                return ;
            }
            context.write(k, v);
        };
    }
    public static class Reduce extends Reducer<Text, Text, Text, Text>{
        private static List<String> names = new ArrayList<String>();
        private static List<String> addrs = new ArrayList<String>();
        private static Text name = new Text();
        private static Text addr = new Text();

        protected void reduce(Text key, Iterable<Text> values, Context context)
                throws java.io.IOException ,InterruptedException {
            for (Text value : values) {
                String temp = value.toString();
                if(temp.startsWith("1")){
                    names.add(temp.substring(1));
                }else{
                    addrs.add(temp.substring(1));
                }
            }
            for (String n : names) {
                for (String a : addrs) {
                    name.set(n);
                    addr.set(a);
                    context.write(name, addr);
                }
            }
            names.clear();
            addrs.clear();
        };
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
        if(otherArgs.length != 2){
            System.err.println("Usage:MTjoin");
            System.exit(2);
        }
        Job job = new Job(conf, "MTjoin");
        job.setJarByClass(MTjoin.class);

        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }
}

设计思路
分析这个事例,显然需要进行单表连接,连接的是左表的parent列和又表的child列,且左表和右表示同一个表。
连接结果中除去连接的两列就是所需要的结果,需要mapreduce解决这个事例,首先应该考虑如何实现表的自连接,其次就是连接的设置,最后是结果的整理
考虑到mapreduce的shuffle过程会将相同的key会连接在一起,所以可以将map结果的key设置成待连接的列,然后列中相同的值自然会连接在一起了,再与最开始的分析联系起来:

要连接的是左表parent列和右表的child列,且左表 和右表是用一个表,所以在map阶段将读入数据分割成child和parent之后,会将parent设置key,child设置成value进行是输出,并作为左表,再将同一队child和parent中的child设置成key,parent设置成value进行key,作为右表,为了区分输出中的左右表,需要在输出的value中再加上左表和右表,然后在shuffle过程中完成连接,reduce收到连接的结果,其中每个key的value-list就包含了“grandchild–grandparent”关系,取出每个key的value-list进行解析,将左表中的child放入一个数组,右表中的parent放入一个数组,然后对两个数组求#笛卡尔积#就是最后的结果了

import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Job;
import org.apche.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionParser;

public class STjoin{
public static int time=0;

public static class Map extends Mapper<Object,Text,Text,Text>{
//实现map函数
public void map(Object key,Text value,Context context)
throws IOException,InterruptedEXception{
String childname=new String();
String parentname=new String();
String relationtype=new String();//左右表标识

//输入的一行预处理问题

StringTokenizer itr=new StringTokenizer(value.toString());
String[] values=new String[2];
int i=0;
while(itr.hasMoreTokens()){
values[i]=itr.nextToken();
i++;
}
if(values[0].compareTo("child")!=0){
childname=values[0];
parentname=values[1];
//输出左表
relationtype="1";
context.write(new Text(values[1]),new Text(relationtype+"+"+childname+"+"+parentname));
//输出右表
relationtype="2";
context.write(new Text(values[0],new Text(relationtype+"+"+childname+"+"+parentname));
}
}
}
public static class Reduce extends Reducer<Text,Text,Text,Text>{
public void reduce(Text key,Iterable<Text> values,Context context)
throws IOException,InterruptedException{
if(0==time){
context.write(new Text("grandchild"),new Text("grandparent"));
time++;
}
int grandchildnum=0;
String [] grandchild=new String [10];
int grandparentnum=0;
String [] grandparent =new String[10];
Iterator ite=values.iterator();
while(ite.hashNext()){
String record =ite.next().toString();
int len=record.length();
int i=2;
if(0==len){
continue;
}
// 取得左右表标识

                char relationtype = record.charAt(0);

                // 定义孩子和父母变量

                String childname = new String();

                String parentname = new String();



                // 获取value-list中value的child

                while (record.charAt(i) != '+') {

                    childname += record.charAt(i);

                    i++;

                }



                i = i + 1;



                // 获取value-list中value的parent

                while (i < len) {

                    parentname += record.charAt(i);

                    i++;

                }



                // 左表,取出child放入grandchildren

                if ('1' == relationtype) {

                    grandchild[grandchildnum] = childname;

                    grandchildnum++;

                }



                // 右表,取出parent放入grandparent

                if ('2' == relationtype) {

                    grandparent[grandparentnum] = parentname;

                    grandparentnum++;

                }

            }



            // grandchild和grandparent数组求笛卡尔儿积

            if (0 != grandchildnum && 0 != grandparentnum) {

                for (int m = 0; m < grandchildnum; m++) {

                    for (int n = 0; n < grandparentnum; n++) {

                        // 输出结果

                        context.write(new Text(grandchild[m]), new Text(grandparent[n]));

                    }

                }

            }

        }

    }


public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
conf.set("mapred.job.tracker","192.168.224.100");
String[] ioArgs = new String[] { "STjoin_in", "STjoin_out" };

        String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();

        if (otherArgs.length != 2) {

            System.err.println("Usage: Single Table Join <in> <out>");

            System.exit(2);

        }



        Job job = new Job(conf, "Single Table Join");

        job.setJarByClass(STjoin.class);



        // 设置Map和Reduce处理类

        job.setMapperClass(Map.class);

        job.setReducerClass(Reduce.class);



        // 设置输出类型

        job.setOutputKeyClass(Text.class);

        job.setOutputValueClass(Text.class);



        // 设置输入和输出目录

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }

}
import java.io.IOException;

import java.util.*;



import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.util.GenericOptionsParser;



public class MTjoin {



    public static int time = 0;



    /*

     * 在map中先区分输入行属于左表还是右表,然后对两列值进行分割,

     * 保存连接列在key值,剩余列和左右表标志在value中,最后输出

     */

    public static class Map extends Mapper<Object, Text, Text, Text> {



        // 实现map函数

        public void map(Object key, Text value, Context context)

                throws IOException, InterruptedException {

            String line = value.toString();// 每行文件

            String relationtype = new String();// 左右表标识



            // 输入文件首行,不处理

            if (line.contains("factoryname") == true

                    || line.contains("addressed") == true) {

                return;

            }



            // 输入的一行预处理文本

            StringTokenizer itr = new StringTokenizer(line);

            String mapkey = new String();

            String mapvalue = new String();

            int i = 0;

            while (itr.hasMoreTokens()) {

                // 先读取一个单词

                String token = itr.nextToken();

                // 判断该地址ID就把存到"values[0]"

                if (token.charAt(0) >= '0' && token.charAt(0) <= '9') {

                    mapkey = token;

                    if (i > 0) {

                        relationtype = "1";

                    } else {

                        relationtype = "2";

                    }

                    continue;

                }



                // 存工厂名

                mapvalue += token + " ";

                i++;

            }



            // 输出左右表

            context.write(new Text(mapkey), new Text(relationtype + "+"+ mapvalue));

        }

    }



    /*

     * reduce解析map输出,将value中数据按照左右表分别保存,

  * 然后求出笛卡尔积,并输出。

     */

    public static class Reduce extends Reducer<Text, Text, Text, Text> {



        // 实现reduce函数

        public void reduce(Text key, Iterable<Text> values, Context context)

                throws IOException, InterruptedException {



            // 输出表头

            if (0 == time) {

                context.write(new Text("factoryname"), new Text("addressname"));

                time++;

            }



            int factorynum = 0;

            String[] factory = new String[10];

            int addressnum = 0;

            String[] address = new String[10];



            Iterator ite = values.iterator();

            while (ite.hasNext()) {

                String record = ite.next().toString();

                int len = record.length();

                int i = 2;

                if (0 == len) {

                    continue;

                }



                // 取得左右表标识

                char relationtype = record.charAt(0);



                // 左表

                if ('1' == relationtype) {

                    factory[factorynum] = record.substring(i);

                    factorynum++;

                }



                // 右表

                if ('2' == relationtype) {

                    address[addressnum] = record.substring(i);

                    addressnum++;

                }

            }



            // 求笛卡尔积

            if (0 != factorynum && 0 != addressnum) {

                for (int m = 0; m < factorynum; m++) {

                    for (int n = 0; n < addressnum; n++) {

                        // 输出结果

                        context.write(new Text(factory[m]),

                                new Text(address[n]));

                    }

                }

            }



        }

    }



    public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();

        // 这句话很关键

        conf.set("mapred.job.tracker", "192.168.1.2:9001");



        String[] ioArgs = new String[] { "MTjoin_in", "MTjoin_out" };

        String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();

        if (otherArgs.length != 2) {

            System.err.println("Usage: Multiple Table Join <in> <out>");

            System.exit(2);

        }



        Job job = new Job(conf, "Multiple Table Join");

        job.setJarByClass(MTjoin.class);



        // 设置Map和Reduce处理类

        job.setMapperClass(Map.class);

        job.setReducerClass(Reduce.class);



        // 设置输出类型

        job.setOutputKeyClass(Text.class);

        job.setOutputValueClass(Text.class);



        // 设置输入和输出目录

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));

        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }

}
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值