Spark aggregateByKey 功能
对相同key数据的按照不同的规则进行分区内计算和分区间计算
案例演示
sc.makeRDD(List((“a”, 3), (“a”, 2), (“c”, 4), (“b”, 3), (“c”, 6), (“c”, 8)), numSlices = 2)
需求:按照key求分区内最大值,分区间求和
package com.xcu.bigdata.spark.core.pg02_rdd.pg022_rdd_transform
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* @Desc : 按照Key处理分区内和分区间的逻辑
*/
object Spark15_AggregateByKey {
def main(args: Array[String]): Unit = {
//创建配置文件
val conf: SparkConf = new SparkConf().setAppName("Spark15_AggregateByKey").setMaster("local[*]")
//创建SparkContext,该对象是提交的入口
val sc = new SparkContext(conf)
//创建RDD
val rdd: RDD[(String, Int)] = sc.makeRDD(List(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)), numSlices = 2)
//分区内求最大值,分区间求和
//zeroValue = 0 每个key对应value的初始值
val resRDD: RDD[(String, Int)] = rdd.aggregateByKey(zeroValue = 0)(
//分区内求最大值
(x: Int, y: Int) => math.max(x, y),
//分区间求和
(a: Int, b: Int) => a + b
)
//打印输出
resRDD.collect().foreach(println)
//释放资源
sc.stop()
}
}
res:
(b,3)
(a,3)
(c,12)