import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
object Cogroup {
def main(args: Array[String]): Unit = {
val sc = new SparkContext("local", "Cogroup", new SparkConf())
cogroupTrans(sc)
sc.stop()
}
def cogroupTrans(sc: SparkContext): Unit = {
val stuNames = Array(
Tuple2(1, "Spark"),
Tuple2(2, "Tecc"),
Tuple2(3, "Hadoop"))
val stuScores = Array(
Tuple2(1, 100),
Tuple2(1, 99),
Tuple2(2, 95),
Tuple2(3, 65))
val names = sc.parallelize(stuNames)
val scores = sc.parallelize(stuScores)
val stuNameAndScore = names.cogroup(scores)
stuNameAndScore.collect().foreach(println)
}
import org.apache.spark.SparkConf
object Cogroup {
def main(args: Array[String]): Unit = {
val sc = new SparkContext("local", "Cogroup", new SparkConf())
cogroupTrans(sc)
sc.stop()
}
def cogroupTrans(sc: SparkContext): Unit = {
val stuNames = Array(
Tuple2(1, "Spark"),
Tuple2(2, "Tecc"),
Tuple2(3, "Hadoop"))
val stuScores = Array(
Tuple2(1, 100),
Tuple2(1, 99),
Tuple2(2, 95),
Tuple2(3, 65))
val names = sc.parallelize(stuNames)
val scores = sc.parallelize(stuScores)
val stuNameAndScore = names.cogroup(scores)
stuNameAndScore.collect().foreach(println)
}
}
运行结果:
(1,(CompactBuffer(Spark),CompactBuffer(100, 99)))
(3,(CompactBuffer(Hadoop),CompactBuffer(65)))
(2,(CompactBuffer(Tecc),CompactBuffer(95)))
两个RDD先各自按照key进行分组,再进行联合分组