spark DSL _数据源_RDD--DF_窗口函数(重点)

spark DSL

DSL的数据来源

package com.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo4DataSource {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder
      .master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    /**
     * csv:格式
     * 选择指定的表机构,分隔符(csv默认是 ,) 文件路径
     *
     */
    val csvDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("data/students.txt")

    //将数据保存为csv格式
    csvDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "count_stu")
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      //.save("data/sco_count")

    /**
     * json:格式
     * spark:会自动将json中的字段名和字段类型解析出来
     *
     * json格式比csv格式占用的空间更大,再发数据的场景下不适用json
     */
    val josnDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")


    //将数据保存为json格式
    josnDF
      .groupBy($"clazz")
      .agg(count($"gender") as "gender_num")
      .write
      .format("json")
      .mode(SaveMode.Overwrite)
      //.save("data/gender_num")

    /**
     * parquet:带表的结构的压缩格式
     * 压缩:时间换空间
     * 压缩比取决于 信息熵
     *
     */
    //将数据保存为parquet格式
    josnDF
      .write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("data/parquet_sj")
    //读取 parquet 的数据
    val parquetDF: DataFrame = spark
      .read
      .format("parquet")
      .load("data/parquet_sj")

    //parquetDF.show(22)

    /**
     * 读取JDBC中的数据
     *
     */
    val jdbcDF: DataFrame = spark
      .read
      .format("JDBC")
      .option("url", "jdbc:mysql://master:3306")
      .option("dbtable", "bigdata17.dept")
      .option("user", "root")
      .option("password", "123456")
      .load()

    jdbcDF.printSchema()
    jdbcDF.show()
  }
}

spark DSL RDD转化为DF

package com.sql
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo5RDDToDF {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder
      .master("local")
      .config("spark.sql.shuffle.partitions", 1)
      .appName("rdd")
      .getOrCreate()

    import spark.implicits._

    /**
     * 获取sparkContext.使用RDD, api
     *
      */
    val sc: SparkContext = spark.sparkContext

    val linesRDD: RDD[String] = sc.textFile("data/students.txt")

    val studentsRDD: RDD[(String, String, String, String, String)] = linesRDD
      .map(_.split(","))
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
          (id, name, age, gender, clazz)
      }

    /**
     * 将rdd保存为DF
     */

    val studentDF: DataFrame = studentsRDD.toDF("id", "name", "age", "gender", "clazz")

    studentDF.printSchema()
    studentDF.show()   
  }
}

spark DSL DF转化为RDD

package com.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo6DFToRDD {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession= SparkSession
      .builder
      .appName("df")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    val studentDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")


//    studentDF.printSchema()
//    studentDF.show()

    //将DF转换成RDD
    val studentsRDD: RDD[Row] = studentDF.rdd

    val stuRDD: RDD[(String, String, Long, String, String)] = studentsRDD.map((row: Row) => {
      //通过列明去取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Long = row.getAs[Long]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    stuRDD.foreach(println)

    val caseRDD: RDD[(String, String, Long, String, String)] = stuRDD.map {
      //需要注意字段顺序
      case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
        (id, name, age, gender, clazz)
    }

    caseRDD.foreach(println)
  }
}

spark sql 的窗口函数

package com.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo6DFToRDD {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession= SparkSession
      .builder
      .appName("df")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    val studentDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

//    studentDF.printSchema()
//    studentDF.show()

    //将DF转换成RDD
    val studentsRDD: RDD[Row] = studentDF.rdd

    val stuRDD: RDD[(String, String, Long, String, String)] = studentsRDD.map((row: Row) => {
      //通过列明去取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Long = row.getAs[Long]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    stuRDD.foreach(println)

    val caseRDD: RDD[(String, String, Long, String, String)] = stuRDD.map {
      //需要注意字段顺序
      case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
        (id, name, age, gender, clazz)
    }

    caseRDD.foreach(println)
  }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值