Spark combineByKey 功能描述
重要的三个参数:简单说
createCombiner: V => C : 对key的value进行转换结构
mergeValue: (C, V) => C : 同一分区内,相同key的value进行处理
mergeCombiners: (C, C) => C :不同分区间,相同key的value进行处理
演示数据:
val data: RDD[(String, Int)] = sc.parallelize(List(
("zhangsan", 80),
("zhangsan", 88),
("zhangsan", 91),
("zhangsan",89),
("lisi", 80),
("lisi", 82),
("lisi", 86),
("wangwu", 89),
("wangwu", 82)
))
spark combineByKey数据流程
实例代码
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object spark_rdd_aggregate {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setAppName("aggregate").setMaster("local")
val sc = new SparkContext(conf)
sc.setLogLevel("ERROR")
val data: RDD[(String, Int)] = sc.parallelize(List(
("zhangsan", 80),
("zhangsan", 88),
("zhangsan", 91),
("zhangsan",89),
("lisi", 80),
("lisi", 82),
("lisi", 86),
("wangwu", 89),
("wangwu", 82)
))
println("*****************聚合函数groupByKey************************")
// key-value 按照key进行聚合,value为集合
val group: RDD[(String, Iterable[Int])] = data.groupByKey()
group.foreach(println)
println("*****************聚合函数 行转列************************")
val res01: RDD[(String, Int)] = group.flatMap(s => s._2.map(x => (s._1, x)))
res01.foreach(println)
println("*****************聚合函数 value扁平化************************")
group.flatMapValues(e => e.iterator).foreach(println)
println("*****************聚合函数 mapValue************************")
group.mapValues(p =>p.toList.sorted.take(2)).foreach(println)
println("*****************聚合函数 flatMapValues************************")
group.flatMapValues(v=>v.toList.sorted.take(2)).foreach(println)
println("*****************聚合函数 sum,count,min,max,avg************************")
println("*****************聚合函数 sum************************")
val sum: RDD[(String, Int)] = data.reduceByKey(_ + _)
sum.foreach(println)
println("*****************聚合函数 方式1:count************************")
val count01: RDD[(String, Int)] = data.map(s => (s._1, 1)).reduceByKey(_ + _)
count01.foreach(println)
println("*****************聚合函数 方式2:count************************")
val count02: RDD[(String, Int)] = data.mapValues(x => 1).reduceByKey(_ + _)
count02.foreach(println)
println("*****************聚合函数 max************************")
val max: RDD[(String, Int)] = data.reduceByKey((ov, nv) => if (ov > nv) ov else nv)
max.foreach(println)
println("*****************聚合函数 min************************")
val min: RDD[(String, Int)] = data.reduceByKey((ov, nv) => if (ov < nv) ov else nv)
min.foreach(println)
println("*****************聚合函数 avg************************")
// createCombiner: V => C
// ,
// mergeValue: (C, V) => C
// ,
// mergeCombiners: (C, C) =>
val resRDD: RDD[(String, (Int, Int))] = data.combineByKey(
// createCombiner:将分区内的每种key调用一次,将v=>(v,1):将key对应的值映射成一个二元组
// ("zhangsan", 80), (80, 1),
// ("zhangsan", 88), (88, 1),
// ("zhangsan", 89), (89, 1),
// ("zhangsan", 91), ==> (91, 1),
// ("lisi", 80), (80, 1),
// ("lisi", 86), (86, 1),
// ("lisi", 82), (82, 1),
// ("wangwu", 89), (89, 1),
// ("wangwu", 82) (82,1)
(value: Int) => (value, 1),
// mergeValue:将createCombiner的结果在每个分区内与相同的key的值进行合并
// (okdValue:(Int,Int),newValue:Int)=>(oldValue_1+newValue,okdValue_2+1)
// 元组的第一个okdValue._1与newValue相加,okdValue._2计数+1
(oldValue: (Int, Int), newValue: Int) => (oldValue._1 + newValue, oldValue._2 + 1),
// mergeCombiners:将mergeValue合并后的各个分区内相同的key对应的结果进行聚合
(v1: (Int, Int), v2: (Int, Int)) => (v1._1 + v2._1, v1._2 + v2._2)
)
val avg: RDD[(String, Int)] = resRDD.mapValues(x => x._1 / x._2)
avg.foreach(println)
}
}