spark的rdd算子

spark的rdd算子

mapPartitions

scala> val rdd1=sc.parallelize(List(1,2,3,4,5,6),2)
scala> val rdd2 = rdd1.map(_ * 10)
scala> rdd2.collect
res0: Array[Int] = Array(10, 20, 30, 40, 50, 60)

scala> val rdd2 = rdd1.mapPartitions(_.map(_*10))
scala> rdd2.collect
res1: Array[Int] = Array(10, 20, 30, 40, 50, 60)

mapWith和flatMapWith

scala> val rdd1 = sc.makeRDD(List(1,2,3,4,5,6), 2)
scala> rdd1.mapWith(i => i*10)((a, b) => b+2).collect
res0: Array[Int] = Array(2, 2, 2, 12, 12, 12)
//原RDD中的元素经map处理后只能生成一个元素,而原RDD中的元素经处理后可生成多个元素
//过程:分区号*10+2,a未用到
scala> rdd1.flatMapWith(i => i, true)((x, y) => List((y, x))).collect
res2: Array[(Int, Int)] = Array((0,1), (0,2), (0,3), (1,4), (1,5), (1,6))

aggregate

scala> val rdd1 = sc.makeRDD(List(1,2,3,4,5,6),2)
scala> rdd1.aggregate(0)(math.max(_,_), _+_)
res7: Int = 9
//过程:分区一:max(max(max(0,1),2),3)=3 分区二:同理=6 求和:0+3+6=9

scala> rdd1.aggregate(2)(math.max(_,_), _+_)
res10: Int = 11
//过程:分区一:max(max(max(2,1),2),3)=3 分区二:同理=6 求和:2+3+6=11

val rdd1 = sc.makeRDD(List("a","b","c","d","e","f"),2)
def func2(index: Int, iter: Iterator[(String)]) : Iterator[String] = {
  iter.toList.map(x => "[partID:" +  index + ", val: " + x + "]").iterator
}
scala> rdd1.mapPartitionsWithIndex(func2).collect
res11: Array[String] = Array([partID:0, val: a], [partID:0, val: b], [partID:0, val: c], [partID:1, val: d], [partID:1, val: e], [partID:1, val: f])

scala> rdd1.aggregate("")(_ + _, _ + _)
res12: String = abcdef
//过程:分区一:a+b+c=abc 分区二:同理=def 求和:abc+def=abcdef

scala> rdd1.aggregate("*")(_ + _, _ + _)
res15: String = **abc*def
//过程:分区一:*+a+b+c=abc 分区二:同理=*def 求和:*+*abc+*def=**abc*def

scala> val rdd3 = sc.parallelize(List("12","23","345","4567"),2)
scala> rdd3.aggregate("")((x,y) => math.max(x.length, y.length).toString, (x,y) => x + y)
res16: String = 24
//过程:分区一:max(max(0,2).tostring.len=1, 2).tostring=2 分区二:同理=4 求和:2+4=24或42

scala> val rdd4 = sc.parallelize(List("12","23","345",""),2)
scala> rdd4.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x + y)
res17: String = 10
//过程:分区一:min(min(0,2).tostring.len=1, 2).tostring=1 分区二:同理=0 求和:1+0=10或01

scala> val rdd5 = sc.parallelize(List("12","23","","345"),2)
scala> rdd5.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x + y)
res18: String = 11
//过程:分区一:min(min(0,2).tostring.len=1, 2).tostring=1 
//分区二:min(min(0,0).tostring.len=1, 3).tostring=1  求和:1+1=11

aggregateByKey


scala> val pairRDD = sc.parallelize(List(("mouse", 2),("cat",2), ("cat", 5), ("mouse", 4),("cat", 12), ("dog", 12)), 2)
pairRDD: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[14] at parallelize at <console>:24

scala> def func2(index: Int, iter: Iterator[(String, Int)]) : Iterator[String] = 	{iter.toList.map(x => "[partID:" +  index + ", val: " + x + "]").iterator}

scala> pairRDD.mapPartitionsWithIndex(func2).collect
res19: Array[String] = Array([partID:0, val: (mouse,2)], [partID:0, val: (cat,2)], [partID:0, val: (cat,5)], [partID:1, val: (mouse,4)], [partID:1, val: (cat,12)], [partID:1, val: (dog,12)])

scala> pairRDD.aggregateByKey(0)(math.max(_, _), _ + _).collect
res20: Array[(String, Int)] = Array((dog,12), (cat,17), (mouse,6))
//过程:分区一:("mouse", 2>0),("cat",5>2>0)
//分区二:("mouse", 4>0),("cat", 12>0), ("dog", 12>0)
//求和:("mouse", 2+4=6),("cat",5+12=17),("dog", 12)

scala> pairRDD.aggregateByKey(100)(math.max(_, _), _ + _).collect
res21: Array[(String, Int)] = Array((dog,100), (cat,200), (mouse,200))
//过程:分区一:("mouse", 100>2),("cat",100>5>2)
//分区二:("mouse", 100>4),("cat", 100>12), ("dog", 100>12)
//求和:("mouse", 100+100=200),("cat",100+100=200),("dog", 100)

scala> pairRDD.aggregateByKey(3)(math.max(_, _), _ + _).collect
res0: Array[(String, Int)] = Array((dog,12), (cat,17), (mouse,7))
//过程:分区一:("mouse", 3>2),("cat",5>3>2)
//分区二:("mouse", 4>3),("cat", 12>3), ("dog", 12>3)
//求和:("mouse", 3+4=7),("cat",5+12=17),("dog", 12)

countByKey


scala> val rdd1 = sc.parallelize(List(("a", 1), ("b", 2), ("b", 2), ("c", 2), ("c", 1)))
rdd1: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[30] at parallelize at <console>:24

scala> rdd1.countByKey
res6: scala.collection.Map[String,Long] = Map(a -> 1, b -> 2, c -> 2)
//以key聚合

scala> rdd1.countByValue
res7: scala.collection.Map[(String, Int),Long] = Map((b,2) -> 2, (c,2) -> 1, (a,1) -> 1, (c,1) -> 1)
//个数

filterByRange

scala> val rdd1 = sc.parallelize(List(("e", 5), ("c", 3), ("d", 4), ("c", 2), ("a", 1)))

scala> rdd1.filterByRange("c", "d").collect
res8: Array[(String, Int)] = Array((c,3), (d,4), (c,2))
//返回在指定范围中的元素。

flatMapValues

scala> val rdd3 = sc.parallelize(List(("a", "1 2"), ("b", "3 4")))

scala> rdd3.flatMapValues(_.split(" ")).collect
res12: Array[(String, String)] = Array((a,1), (a,2), (b,3), (b,4))
//以value为条件进行操作生成新的rdd

foldByKey

scala> val rdd1 = sc.parallelize(List("dog", "wolf", "cat", "bear"), 2)

scala> val rdd2 = rdd1.map(x => (x.length, x))
res13: Array[(Int, String)] = Array((3,dog), (4,wolf), (3,cat), (4,bear))
scala> rdd2.foldByKey("*")(_+_).collect
res17: Array[(Int, String)] = Array((4,*wolf*bear), (3,*dog*cat))
//添加连接符,将相同key值value连接起来

scala> sc.textFile("hdfs://hadoop02:8020/wc").flatMap(_.split(" ")).map((_, 1)).foldByKey(0)(_+_).collect
res19: Array[(String, Int)] = Array((a,b,c,1), (d,a,c,2))
//过程:原数据-->Array((a,b,c,1), (d,a,c,1), (d,a,c,1))--> Array((a,b,c,0+1), (d,a,c,0+2))

foreachPartition

scala> val rdd1 = sc.makeRDD(List(1, 2, 3, 4, 5, 6, 7, 8, 9), 3)
scala> rdd1.foreachPartition(x => println(x.reduce(_ + _)))
6
15
24
//分别计算各个分区并输出

keyBy

scala> val rdd1 = sc.parallelize(List("dog", "salmon", "salmon", "rat", "elephant"), 3)

scala> rdd1.keyBy(_.length).collect
res25: Array[(Int, String)] = Array((3,dog), (6,salmon), (6,salmon), (3,rat), (8,elephant))
//计算长度

keys values

scala> val rdd1 = sc.parallelize(List("dog", "tiger", "lion", "cat", "panther", "eagle"), 2)
scala> val rdd2 = rdd1.map(x => (x.length, x))

scala> rdd2.keys.collect
res26: Array[Int] = Array(3, 5, 4, 3, 7, 5)

scala> rdd2.values.collect
res27: Array[String] = Array(dog, tiger, lion, cat, panther, eagle)

collectAsMap

scala> val rdd = sc.parallelize(List(("a", 1), ("b", 2)))

scala> rdd.collectAsMap
res28: scala.collection.Map[String,Int] = Map(b -> 2, a -> 1)
//以map的形式输出

repartition, coalesce, partitionBy

scala> val rdd1 = sc.parallelize(1 to 10, 3)
rdd1: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[77] at parallelize at <console>:24

scala> val rdd2 = rdd1.coalesce(2, false)
rdd2: org.apache.spark.rdd.RDD[Int] = CoalescedRDD[78] at coalesce at <console>:25
//屏蔽一个partition分区
scala> rdd2.partitions.length
res29: Int = 2
//分区数量

checkpoint

//为当前RDD设置检查点。该函数将会创建一个二进制的文件,并存储到checkpoint目录中,该目录是用SparkContext.setCheckpointDir()设置的。在checkpoint的过程中,该RDD的所有依赖于父RDD中的信息将全部被移出。对RDD进行checkpoint操作并不会马上被执行,必须执行Action操作才能触发。

scala> sc.setCheckpointDir("hdfs://hadoop02:8020/cp")

scala> val rdd = sc.textFile("hdfs://hadoop02:8020/wc").flatMap(_.split(" ")).map((_, 1)).reduceByKey(_+_)
rdd: org.apache.spark.rdd.RDD[(String, Int)] = ShuffledRDD[88] at reduceByKey at <console>:24

scala> rdd.checkpoint

scala> rdd.isCheckpointed
res38: Boolean = false

scala> rdd.count
res39: Long = 2

scala> rdd.isCheckpointed
res40: Boolean = true

scala> rdd.getCheckpointFile
res41: Option[String] = Some(hdfs://hadoop02:8020/cp/d009afb3-cff4-40f0-8fa6-89fde0259228/rdd-88)

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值