我如果也能学会RDD

1.什么是RDD

RDD是一个容错的,只读的,可进行并行操作的数据结构,是一个分布在集群各个节点中的存放元素的集合。

2.课本p62(3.3.5)

package com.tipdm.sparkDemo
import org.apache.spark.{SparkConf, SparkContext}
object rz {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new SparkContext(conf)
    val first_half = sc.textFile("D:\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("D:\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix,it) =>{
      if (ix == 0) it.drop(1)
      it
    })
    val drop_second =second_half.mapPartitionsWithIndex((ix,it) =>{
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(
      line =>{val data = line.split(",");(data(1),data(6).toInt)}
    )
    val split_second = drop_second.map(
      line =>{val data = line.split(",");(data(1),data(6).toInt)}
    )
    val filter__first = split_first.filter(x => x._2 > 200000).map(x =>x._1)
    val filter__second = split_second.filter(x => x._2 > 200000).map(x =>x._1)
    val name = filter__first.union(filter__second).distinct()
    name.collect.foreach(println)
  }
}

3.转换方法

package com.tipdm.sparkDemo
import org.apache.spark.{SparkConf, SparkContext}
object mg {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd1 = sc.parallelize(List(1,2,3))
    val rdd2 = sc.parallelize(List(3,4,5,7))
    val rdd3 = sc.parallelize(List('a','b','c'))
    val rdd4 = rdd1.union(rdd2)
    val rdd5 = rdd4.filter(_ >= 3)
    val rdd6 = rdd5.distinct()
    rdd6.cartesian(rdd3).collect.foreach(println)

  }
}

4.

创建rdd1与rdd2,用union()方法合并rdd1与rdd2放在rdd4中

创建rdd5,用filter()方法过滤去除3以下的数据

创建rdd6,先用distinct()方法去重,再用cartesian()方法输出笛卡尔积

package com.tipdm.sparkDemo
import org.apache.spark.{SparkConf, SparkContext}

object a4 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd1 = sc.parallelize(
      List(('a',1),('a',2),('b',1),('c',1),('c',1))
    )
    val re_rdd1 = rdd1.reduceByKey((a,b) => a+b)
    re_rdd1.collect.foreach(println)
    val g_rdd1 = rdd1.groupByKey()
    g_rdd1.collect.foreach(println)
    g_rdd1.map(x => (x._1, x._2.size)).collect.foreach(println)
  }
}

创建re_rdd1,用reduceByKey()方法合并统计键相同的值,将值相加输出

创建g_rdd1,用groupByKey()方法对具有相同的值进行分组,将相同键的值的数量输出

package com.tipdm.sparkDemo
import org.apache.spark.{SparkConf, SparkContext}
object a3 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new SparkContext(conf)
    val first_half = sc.textFile("D:\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("D:\\Employee_salary_first_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix ,it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(
      line => {val data = line.split(",");(data(1), data(6).toInt)}
    )
    val split_second = drop_second.map(
      line => {val data = line.split(",");(data(1), data(6).toInt)}
    )
    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name = filter_first.union(filter_second).distinct()
    name.collect.foreach(println)
  }
}
用textFile()方法将文件内容创建为rdd

用map方法加入逗号

用filter方法过滤小于20万的员工

用distinct()方法去重

最后输出结果

  • 22
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值