语法
val newRdd = oldRdd.groupByKey
源码
def groupByKey(partitioner : org.apache.spark.Partitioner) : org.apache.spark.rdd.RDD[scala.Tuple2[K, scala.Iterable[V]]] = { /* compiled code */ }
作用
对K-V类型的RDD按照Key对value分组。
例子
package com.day1
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object oper {
def main(args: Array[String]): Unit = {
val config:SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")
// 创建上下文对象
val sc = new SparkContext(config)
// groupByKey算子
val arrayRdd = sc.makeRDD(Array("张三","李四","王五","刘六","张三","李四","张三","刘六"))
val mapRdd = arrayRdd.map(word => (word,1))
val groupByKeyRdd = mapRdd.groupByKey()
groupByKeyRdd.collect().foreach(println)
val mapRdd2 = groupByKeyRdd.map(t => (t._1,t._2.sum))
mapRdd2.collect().foreach(println)
}
}
输入
"张三" "李四" "王五" "刘六" "张三" "李四" "张三" "刘六"
输出
(张三,CompactBuffer(1, 1, 1))
(刘六,CompactBuffer(1, 1))
(李四,CompactBuffer(1, 1))
(王五,CompactBuffer(1))
(张三,3)
(刘六,2)
(李四,2)
(王五,1)