SparkDay03

一、Data Frame表

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    /**
     *
     * 创建spark环境
     */
    val spark: SparkSession = SparkSession
      .builder()
      .appName("wc")
      .master("local")
      .getOrCreate()

    /**
     *
     * 1、读取数据
     */

    /**
     *
     * DataFrame:表结构
     * 在RDD的基础上增加了列明 和列类型
     *
     */

    val linesDF: DataFrame = spark
      .read
      .format("csv") //读取数据的格式
      .option("sep", "\t") //指定字段的分割方式 默认是逗号
      .schema("line String") //指定字段名和字段类型
      .load("data/words.txt")

    //将DF注册成一个临时视图
    linesDF.createOrReplaceTempView("lines")

    /**
     *
     * 基于创建的表写sql
     *
     */

    val wordCountDF: DataFrame = spark.sql(
      """
        |select word
        |,count(1) as c
        |from
        |(select explode(split(line,",")) word
        |from lines
        |) as a
        |group by word
        |
        |""".stripMargin)

    wordCountDF.show()

    //将DF的数据保存到HDFS上
    wordCountDF
      .write
      .format("csv")
      .option("sep","\t")
      .mode(SaveMode.Overwrite)//如果输出路径已经存在自动覆盖
      .save("data/wc")
  }
}

二、创建DataFrame表

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo2CreateDataFrame {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("df")
      .getOrCreate()

    /**
     * 读取csv格式的数据创建df
     *
     */

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //列的分割方式
      //指定表结构 必须按照顺序指定
      .schema("id STRING, name STRING, age INT, gender STRING, clazz STRING")
      .load("data/students.txt")//指定读取的路径

    //打印表结构
    studentDF.printSchema()

    //查看数据默认打印前20行
    studentDF.show()


    /**
     * 2、读取JSON格式的数据构建DF
     * spark会自动解析JSON格式
     *
     */

    val studentJsonDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

    studentJsonDF.printSchema()
    studentJsonDF.show()


    /**
     * 3、读取jdbc数据
     *
     * 通过网络远程读取mysql中的数据
     *
     */

    val jdbcDF: DataFrame = spark
      .read
      .format("jdbc")
      .option("url", "jdbc:mysql://master:3306")
      .option("dbtable", "bigdata.students")
      .option("user", "root")
      .option("password", "123456")
      .load()

    jdbcDF.printSchema()
    jdbcDF.show()

    /**
     *
     * 4、读取parquet格式的数据
     * parquet格式的数据中自带列的列名和列的类型
     * parquet会对数据进行压缩 体积变小解压和压缩需要时间
     */

    //保存一个parquet格式的文件
    studentDF
      .write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("data/parquet")

    //读取parquet格式的数据
    val parquetDF: DataFrame = spark
      .read
      .format("parquet")
      .load("data/parquet")

    parquetDF.printSchema()
    parquetDF.show()
  }

}

三、DataFrameAPI

package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo3DFApi {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("api")
      //设置sparksql在shuffle过程之后DF的分区数据 默认是200
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //1、读取数据
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("data/students.txt")

    /**
     *
     * show:相当于action算子
     *
     */
    //默认打印前20条数据
    studentDF.show()
    //指定需要打印的行数
    studentDF.show(100)
    //完全显示一行数据
    studentDF.show(false)

    /**
     * select选择字段和sql中的select是一样
     *
     */

    //通过列名获取列
    studentDF.select("id", "age").show()
    //在select中对字段做处理 和sql语法是一样的
    studentDF.selectExpr("id", "age +1 as age").show()

    //导入隐式转换
    import spark.implicits._

    //使用队列的方式获取列
    studentDF.select($"id", $"age" + 2 as "age").show()

    /**
     * where:过滤数据
     *
     */

    //使用字符串表达式
    studentDF.where("gender = '男'").show()

    //适用对象的方式
    studentDF.where($"gender" === "女").show

    /**
     * group by
     *
     */
    studentDF
      .groupBy($"clazz")
      .count()
      .show()

    /**
     * agg:分组之后进行聚合计算
     *
     */

    //导入DSL所有的函数
    import org.apache.spark.sql.functions._

    studentDF
      .groupBy($"clazz")
    //分组之后做聚合计算
      .agg(count($"clazz") as "c", avg($"age") as "avgAge")
      .show()

    /**
     * join
     */

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING, cid STRING, sco INT")
      .load("data/score.txt")

    //当关联字段不一致的时候
//    val joinDF: DataFrame = studentDF.join(scoreDF, $"id" === $"id", "inner")

    //当关联字段不一样的时候
    val joinDF: DataFrame = studentDF.join(scoreDF, "id")

    joinDF.show()

    /**
     *
     * 统计每个班总分前2的学生
     *
     * withColum:给DF增加新的列
     */

    joinDF
    //按照id和班级分组
      .groupBy($"id",$"clazz")
    //对分数求和
      .agg(sum($"sco") as "sumSco")

    //使用开窗函数
      .select($"id",$"clazz",row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc)as "r")

    //在前面的DF的基础上增加列
//      .withColumn("r",row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc))

    //取前2
      .where($"r"<=2)
      .show()


    /**
     * sql
     *
     */
    joinDF.createOrReplaceTempView("stu_sco")

    spark.sql(
      """
        |select * from(
        |select
        |id
        |,clazz,
        |sumSco,
        |row_number() over(partition by clazz order by sumSco desc) as r
        |from(
        |select id,clazz,sum(sco) as sumSco
        |from stu_sco
        |group by id,clazz
        |) as a
        |) as b
        |where r <= 2
        |
        |""".stripMargin).show()


    /**
     * orderBy
     *
     */

    scoreDF
      .groupBy($"id")
      .agg(sum($"sco") as "sumSco")
      .orderBy($"sumSco".desc)
      .show()

  }

}

四、DataFrameTest

1、统计每个公司每年按月累计收入

package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Column, DataFrame, SparkSession}

object Demo4Burk {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("bur1")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取数据
    val burks: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("burk STRING,year STRING,tsl01 DOUBLE,tsl02 DOUBLE,tsl03 DOUBLE,tsl04 DOUBLE,tsl05 DOUBLE,tsl06 DOUBLE,tsl07 DOUBLE,tsl08 DOUBLE,tsl09 DOUBLE,tsl10 DOUBLE,tsl11 DOUBLE,tsl12 DOUBLE")
      .load("data/income.txt")
    burks.show()

    burks.createOrReplaceTempView("burks")

    /**
     * 1、统计每个公司每年按月累计收入 行转列 -->sum窗口函数
     *
     * 输出结果
     * 公司代码,年度,月份,当月收入,累计收入
     *
     */

    spark.sql(
      """
        |
        |select explode(Array(1,2,3,4,5,6,7))
        |""".stripMargin)
      .show()

    spark.sql(
      """
        |select explode(map('001',23,'002',24,'003',25))
        |""".stripMargin)
      .show()

    /**
     * sum over 只分区 全局求和
     * sum over order by 分区有排序 累计求和
     *
     */

    spark.sql(
      """
        select
        |burk,year,month,pic,
        |sum(pic) over(partition by burk,year order by month) as sumPic,
        |sum(pic) over(partition by burk,year) as sumP
        |from(
        |select burk,year,month,pic
        |from burks
        |lateral view explode(map(1,tsl01,2,tsl02,3,tsl03,4,tsl04,5,tsl05,6,tsl06,7,tsl07,8,tsl08,9,tsl09,10,tsl10,11,tsl11,12,tsl12)) T as month,pic
        |) as a
        |
        |""".stripMargin)
      .show()

    val mapCol: Column = map(
      expr("1"), $"tsl01",
      expr("2"), $"tsl02",
      expr("3"), $"tsl03",
      expr("4"), $"tsl04",
      expr("5"), $"tsl05",
      expr("6"), $"tsl06",
      expr("7"), $"tsl07",
      expr("8"), $"tsl08",
      expr("9"), $"tsl09",
      expr("10"), $"tsl10",
      expr("11"), $"tsl11",
      expr("12"), $"tsl12"
    )
    val burksMonthDF: DataFrame = burks
      //将一行转换为多行
      .select($"burk", $"year", explode(mapCol) as Array("month", "pic"))
    //计算收入按月累计
    burksMonthDF
      .withColumn("sumPic", sum($"pic") over Window.partitionBy($"burk", $"year").orderBy("month"))
      .show()

    /**
     * 2、统计每个公司每月比上年同期增长率    行转列 --->lag开窗函数
     * 公司代码,年度,月度,增长率(当月收入/上年当月收入 -1)
     *
     * coalesce:返回第一个不为空的列
     *
     */

    burksMonthDF
      //获取上一年当月的收入金额
      .withColumn("lagPic", lag($"pic", 1, 0.0) over Window.partitionBy($"burk", $"month").orderBy($"year"))

      //计算增长率
      .withColumn("p", coalesce($"pic" / $"lagPic" - 1, expr("1.0")))
      .show()
  }
}

2、统计人的工作履历

package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo5Shebao {
  def main(args: Array[String]): Unit = {

    /**
     * 1、统计人的工作履历
     *
     * 统计结果
     * 统计结果
     * 91330000733796106P,杭州海康威视数字技术股份有限公司,2020-02-01 00:00:00,2020-05-01 00:00:00
     * 91330000733796106P,阿里云计算有限公司,2020-06-01 00:00:00,2020-09-01 00:00:00
     * 91330000733796106P,杭州海康威视数字技术股份有限公司,2020-10-01 00:00:00,2021-03-01 00:00:00
     */
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("shebao")
      .config("spark.sql.seuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val dataDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,burk STRING,sdate STRING")
      .load("data/data.txt")
    //      .show()

    dataDF
      //1、获取这个人上一个月所在的公司
      .withColumn("lastBurl", lag($"burk", 1) over Window.partitionBy($"id").orderBy("sdate"))
      //2、如果当前月和上一个月是同一个公司在后面打上 标记0 如果不是在同一个公司打上1
      .withColumn("flag", when($"burk" === $"lastBurl", 0).otherwise(1))
    //      .show()
    //3、对后面的标记累加求和
      .withColumn("clazz",sum($"flag") over Window.partitionBy($"id").orderBy($"sdate"))
//      .show()
    //4、按照id 公式 clazz分组
      .groupBy($"id",$"burk")
      .agg(min($"sdate") as "start_date",max($"sdate") as "end_date")
      .show()
  }

}

五、submit-项目在服务器中运行

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo06Submit {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      //      .master("local")
      .appName("submit")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取HDFS中的数据
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("/data/students.txt")

    //处理数据
    val genderNumDF: DataFrame = studentDF
      .groupBy($"gender")
      .agg(count($"gender") as "gender_sum")

    //将数据保存到HDFS中
    genderNumDF
      .write
      .format("csv")
      .option("sep",",")
      .mode(SaveMode.Overwrite)
      .save("/data/gender_sum")

    //将项目打包上传到服务器中运行
    // spark-submit --master yarn-client --class com.shujia.spark.sql.Demo06Submit spark-1.0.jar
  }

}

六、对多次使用的DF进行缓存

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07DFCache {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("submit")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取HDFS中的数据
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("data/students.txt")


    studentDF.show()
    /**
     * 对多次使用的DF进行缓存
     *
     */

    studentDF.cache()

    studentDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "c")
      .show()

    studentDF
      .groupBy($"gender")
      .agg(count($"gender") as "c")
      .show()

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING, cid STRING, sco INT")
      .load("data/score.txt")

    scoreDF.show()


    /**
     *
     * mapjoin
     */
    studentDF
      //将小表广播出去
      .hint("broadcast")
      .join(scoreDF,"id")
      .show()

    while(true){

    }
  }

}

七、Window开窗函数(***************)

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo08Window {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("window")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //列的分割方式
      //指定表结构 必须按照顺序指定
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      //指定读取的路径
      .load("data/students.txt")

    studentDF.createOrReplaceTempView("student")

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING,sco INT")
      .load("data/score.txt")

    scoreDF.createOrReplaceTempView("score")

    /**
     * 窗口函数:排序和不排序的区别
     * 不排序:全局结果 同一个组内每一条数据后面的值都一样
     * 排序:累计
     *
     */

    /**
     * count:在窗口进行统计
     *
     */

    spark.sql(
      """
        |select
        |id,name,age,gender,clazz,
        |count(1) over(partition by clazz) as c
        |from student
        |""".stripMargin)
//      .show(1000)

    /**
     * hive --sql执行顺序
     * from --join --on --where --group by --having --select --order by --limit
     *
     */

    /**
     * avg:窗口函数
     *
     */

    //统计总分数大于平均分的学生

    spark.sql(
      """
        |select * from (
        |select
        |a.id,b.name,b.age,b.gender,b.clazz,sumSco
        |,avg(sumSco) over(partition by subStr(clazz,0,2)) as avgSco
        |from (
        |select id
        |,sum(sco) as sumSco
        |from score
        |group by id
        |) as a
        |join student as b
        |on a.id=b.id
        |) as c
        |where sumSco > avgSco
        |""".stripMargin)
//      .show(2000)

    /**
     * max:窗口内获取最大值
     * min:窗口内计算最小值
     *
     */

    //计算每个学生的总分和班级最高分的差距
    spark.sql(
      """
        |select *,
        |maxSco-sumSco from(
        |select
        |a.id,a.sumSco,b.name,b.age,b.clazz,b.gender,
        |max(sumSco) over (partition by clazz) as maxSco
        |from (
        |select id,sum(sco) as sumSco from score
        |group by id
        |) as a
        |join student as b
        |on a.id=b.id
        |)as c
        |""".stripMargin)
//      .show(1000)

    /**
     * 中位值:窗口计算中位数
     *
     * sum(case when round(s/2)=r then sco else 0 end)
     * 如果条件返回true才做统计
     *case when   if else
     */
    spark.sql(
      """
        |select *,
        |sum(case when round(s/2)=r then sco else 0 end) over (partition by cid) as med
        |from (
        |select
        |*,
        |row_number() over (partition by cid order by sco) as r,
        |count(1) over (partition by cid) as s
        |from score
        |)as a
        |""".stripMargin)
      .show(10000)

    /**
     * DENSE_RANK:连续排序
     * RANK:跳跃排名
     *
     */

    spark.sql(
      """
        |select *,
        |DENSE_RANK() over (partition by cid order by sco) as r1,
        |RANK() over (partition by cid order by sco) as r1
        |from score
        |""".stripMargin)
//      .show(1000)

    /**
     * lag:按偏移量取当前行之前第几行的值
     * lead:按偏移量取当前行之后第几行的值
     *
     */

    //计算学生总分 比较当前学生和前一个学生总分的差距

    spark.sql(
      """
        |select *,
        |sumSco-lagSco as chaSco
        |from (
        |select
        |*,
        |lag(sumSco,1) over (order by sumSco) lagSco
        |from (
        |select id,sum(sco) as sumSco from score
        |group by id
        |) as b
        |) as c
        |""".stripMargin)
      .show()
  }

}

八、DFToRDD&RDDToDF

package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo9DFToRDD {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("rddondf")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    /**
     * 创建好SparkSession之后可以直接取SparkContext
     *
     */

    val sc: SparkContext = spark.sparkContext

    //读取一个文件得到RDD
    val lineRDD: RDD[String] = sc.textFile("data/students.txt")

    //1、将数据拆分
    val studentRDD: RDD[(String, String, Int, String, String)] = lineRDD.map(line => {
      val split: Array[String] = line.split(",")
      (split(0), split(1), split(2).toInt, split(3), split(4))
    })

    /**
     * 将RDD转换成DF
     * 需要注意字段的数量和顺序
     *
     */

    val studentDF: DataFrame = studentRDD.toDF("id", "name", "age", "gender", "clazz")
    studentDF.printSchema()
    studentDF.show()

    /**
     * DF转换成RDD
     */

    val stuRDD: RDD[Row] = studentDF.rdd

    //1、通过字段获取类型
    val kvRDD: RDD[(String, String, Int, String, String)] = stuRDD.map(row => {
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Int = row.getAs[Int]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    kvRDD.foreach(println)

    //2、使用case匹配的方式解析
    val caseRDD: RDD[(String, String, Int, String, String)] = stuRDD.map {
      case Row(id: String, name: String, age: Int, gender: String, clazz: String) =>
        (id, name, age, gender, clazz)
    }

    caseRDD.foreach(println)

  }


}

九、自定义UDF函数

package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction

object Demo10UDF {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("udf")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
     *
     * 自定义UDF函数
     *
     */

    //可以直接用在DSL上
    val ageAdd: UserDefinedFunction = udf((age: Int) => {
      age + 1
    })

    //注册一个自定义函数在sql中使用
    spark.udf.register("ageAdd",ageAdd)

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //列的分割方式
      //指定表结构 必须按照顺序指定
      .schema("id STRING,name STRING, age INT, gender STRING, clazz STRING")
      .load("data/students.txt") //指定读的路径

    studentDF.createOrReplaceTempView("student")

    studentDF
      //在DSL上使用自定义函数
      .select($"id",ageAdd($"age") as "age")
      .show()

    spark.sql(
      """
        |select id,ageAdd(age) from student
        |
        |""".stripMargin)
      .show()
  }

}

十、DFTest

package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo11CarBon {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("udf")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、读取数据
    val lowCarBon: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("user_id STRING, data_dt STRING, low_carbon INT")
      .load("data/ant_user_low_carbon.txt")

    val plantCarBon: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("plant_id STRING, plant_name STRING, plant_carbon INT")
      .load("data/ant_plant_carbon.txt")

    lowCarBon.show()
    plantCarBon.show()

    /**
     * 蚂蚁森林植物申领系统
     * 问题:假设2017年1月1日开始记录低碳数据(user_low_carbon) 假设2017年10月1日之前满足申领条件的用户都申领了一颗p004-胡杨
     * 剩余的能量全部用来领取p-002沙柳
     * 统计在10月1日累计申领p002-沙柳排名前10的用户信息 以及他比后一名多领了几颗沙柳
     * 得到的结果如下表样式:
     *
     *
     * crossJoin:笛卡尔积
     *
     */

    //取出胡杨所需要的克数
    val huyang: DataFrame = plantCarBon.where($"plant_id" === "p004")
    //取出沙柳所需要的克数
    val shaliu: DataFrame = plantCarBon.where($"plant_id" === "p002")

    lowCarBon
      //取出满足条件的数据
      .where($"data_dt" < "2017/10/1" and $"data_dt" >= "2017/1/1")
      //计算每个用户一年总的减少碳排放量
      .groupBy($"user_id")
      .agg(sum($"low_carbon") as "sum_low_carbon")
      //笛卡尔积
      .crossJoin(huyang)
    //统计用户剩余的克数
      .select($"user_id",$"sum_low_carbon" - $"plant_carbon" as "sy_low_carbon")
    //笛卡尔积
      .crossJoin(shaliu)
    //计算用户可以领取多少颗沙柳
      .withColumn("plant_count",floor($"sy_low_carbon" / $"plant_carbon"))
    //获取后一名领取的数量
      .withColumn("less_count",lead($"plant_count",1,0) over Window.orderBy($"plant_count".desc))
      .withColumn("less_num",$"plant_count" - $"less_count")
      .limit(10)
      .select($"user_id",$"plant_count",$"less_num")
      .show()


    /**
     * 问题:查询suer_low_carbon表中每日流水记录,条件为:
     * 用户在2017年 联系三天(或以上)的天数里
     * 每天减少碳排放(low_carbon)都超过100g的用户低碳流水
     *
     */

    lowCarBon
      .where(year(regexp_replace($"data_dt","/","-")) ==="2017")
    //计算每个用户每天总的克数
      .groupBy($"user_id",$"data_dt")
      .agg(sum($"low_carbon") as "sum_low_carbon")
    //超过100g的用户
      .where($"sum_low_carbon" > 100)
    //增加排名字段
      .withColumn("r",row_number() over Window.partitionBy($"user_id").orderBy($"data_dt"))
    //使用日期减去row_number这一个列
      .withColumn("clazz",expr("date_sub(regexp_replace(data_dt,'/','-'),r)"))
    //统计连续的天数
      .withColumn("clazz_count",count($"user_id") over Window.partitionBy($"user_id",$"clazz"))
    //取出连续3天以上的
      .where($"clazz_count" >= 3)
      //关联原始的数据
      .join(lowCarBon,Seq("user_id","data_dt"))
      .select($"user_id",$"data_dt",$"low_carbon")
      .show()
  }

}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

刘浩浩yyds

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值