spark的join和leftJoin使用union和reduce实现

在spark中使用自带的join与leftJoin容易引发性能问题,所以可以改写为union后reduce来提升性能。

 

import scala.reflect.ClassTag
def leftJoin[K:ClassTag,V:ClassTag](left:RDD[(K, V)],right:RDD[(K, V)])={
        left.mapValues(x=>Array((x,0))).union(right.mapValues(x=>Array((x,1))))
        .reduceByKey(_++_)
        .flatMapValues{x=>
            val l=x.filter(_._2==0)
            val r=x.filter(_._2==1)
            if(l.isEmpty){
                List()
            }else if(r.isEmpty){
                l.map{t=>(t._1,None)}.toList
            }else{
                l.map{t=>(t._1,Some(r.head._1))}.toList
            }
        }
    }

    def join[K:ClassTag,V:ClassTag](left:RDD[(K,V)],right:RDD[(K,V)])={
        left.mapValues(x=>Array((x,0))).union(right.mapValues(x=>Array((x,1))))
        .reduceByKey(_++_)
        .flatMapValues{x=>
            val l=x.filter(_._2==0)
            val r=x.filter(_._2==1)
            if(l.isEmpty || r.isEmpty){
                List()
            }else{
                l.map{t=>(t._1,r.head._1)}.toList
            }
        }
    }

    def outerJoin[K: ClassTag, V: ClassTag](left: RDD[(K, V)], right: RDD[(K, V)]) = {
        left.mapValues(x => Array((x, 0))).union(right.mapValues(x => Array((x, 1))))
        .reduceByKey(_ ++ _)
        .mapValues { x =>
            val l = x.filter(_._2 == 0) 
            val r = x.filter(_._2 == 1) 
            if (l.isEmpty && !r.isEmpty) {
                (None, Some(r.head._1))
            } else if (r.isEmpty && !l.isEmpty) { 
                (Some(l.head._1), None)
            } else {
                (Some(l.head._1), Some(r.head._1))
            }
        }.map { case (key, (v1, v2)) =>
            (key, v1, v2)
        }
    }

 

©️2020 CSDN 皮肤主题: 大白 设计师:CSDN官方博客 返回首页