Scala105-Spark.sql中collect_list用法

import org.apache.spark.sql.functions._
import spark.implicits._
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions._
import spark.implicits._
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
val builder = SparkSession
      .builder()
      .appName("learningScala")
      .config("spark.executor.heartbeatInterval","60s")
      .config("spark.network.timeout","120s")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.kryoserializer.buffer.max","512m")
      .config("spark.dynamicAllocation.enabled", false)
      .config("spark.sql.inMemoryColumnarStorage.compressed", true)
      .config("spark.sql.inMemoryColumnarStorage.batchSize", 10000)
      .config("spark.sql.broadcastTimeout", 600)
      .config("spark.sql.autoBroadcastJoinThreshold", -1)
      .config("spark.sql.crossJoin.enabled", true)
      .master("local[*]") 
val spark = builder.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
builder: org.apache.spark.sql.SparkSession.Builder = org.apache.spark.sql.SparkSession$Builder@2b380850
spark: org.apache.spark.sql.SparkSession = org.apache.spark.sql.SparkSession@7b0a688
var df1 = Seq(
(1,"2019-04-01 11:45:50",11.15,"2019-04-02 11:45:49"),
(2,"2019-05-02 11:56:50",10.37,"2019-05-02 11:56:51"),
(3,"2019-07-21 12:45:50",12.11,"2019-08-21 12:45:50"),
(2,"2019-08-01 12:40:50",14.50,"2020-08-03 12:40:50"),
(5,"2019-01-06 10:00:50",16.39,"2019-01-05 10:00:50")
).toDF("id","startTimeStr", "payamount","endTimeStr")
df1 = df1.withColumn("startTime",$"startTimeStr".cast("Timestamp"))
         .withColumn("endTime",$"endTimeStr".cast("Timestamp"))
df1.printSchema
df1.show()
root
 |-- id: integer (nullable = false)
 |-- startTimeStr: string (nullable = true)
 |-- payamount: double (nullable = false)
 |-- endTimeStr: string (nullable = true)
 |-- startTime: timestamp (nullable = true)
 |-- endTime: timestamp (nullable = true)

+---+-------------------+---------+-------------------+-------------------+-------------------+
| id|       startTimeStr|payamount|         endTimeStr|          startTime|            endTime|
+---+-------------------+---------+-------------------+-------------------+-------------------+
|  1|2019-04-01 11:45:50|    11.15|2019-04-02 11:45:49|2019-04-01 11:45:50|2019-04-02 11:45:49|
|  2|2019-05-02 11:56:50|    10.37|2019-05-02 11:56:51|2019-05-02 11:56:50|2019-05-02 11:56:51|
|  3|2019-07-21 12:45:50|    12.11|2019-08-21 12:45:50|2019-07-21 12:45:50|2019-08-21 12:45:50|
|  2|2019-08-01 12:40:50|     14.5|2020-08-03 12:40:50|2019-08-01 12:40:50|2020-08-03 12:40:50|
|  5|2019-01-06 10:00:50|    16.39|2019-01-05 10:00:50|2019-01-06 10:00:50|2019-01-05 10:00:50|
+---+-------------------+---------+-------------------+-------------------+-------------------+






df1: org.apache.spark.sql.DataFrame = [id: int, startTimeStr: string ... 4 more fields]
df1: org.apache.spark.sql.DataFrame = [id: int, startTimeStr: string ... 4 more fields]
df1.createOrReplaceTempView("temp1")
val sql = s"""
SELECT *,
collect_list(payamount) over(partition BY id ORDER BY startTimeStr) payamount_array
FROM temp1
"""
sql: String =
"
SELECT *,
collect_list(payamount) over(partition BY id ORDER BY startTimeStr) payamount_array
FROM temp1
"
val dfCollect = spark.sql(sql)
dfCollect: org.apache.spark.sql.DataFrame = [id: int, startTimeStr: string ... 5 more fields]
dfCollect.show()
+---+-------------------+---------+-------------------+-------------------+-------------------+---------------+
| id|       startTimeStr|payamount|         endTimeStr|          startTime|            endTime|payamount_array|
+---+-------------------+---------+-------------------+-------------------+-------------------+---------------+
|  1|2019-04-01 11:45:50|    11.15|2019-04-02 11:45:49|2019-04-01 11:45:50|2019-04-02 11:45:49|        [11.15]|
|  3|2019-07-21 12:45:50|    12.11|2019-08-21 12:45:50|2019-07-21 12:45:50|2019-08-21 12:45:50|        [12.11]|
|  5|2019-01-06 10:00:50|    16.39|2019-01-05 10:00:50|2019-01-06 10:00:50|2019-01-05 10:00:50|        [16.39]|
|  2|2019-05-02 11:56:50|    10.37|2019-05-02 11:56:51|2019-05-02 11:56:50|2019-05-02 11:56:51|        [10.37]|
|  2|2019-08-01 12:40:50|     14.5|2020-08-03 12:40:50|2019-08-01 12:40:50|2020-08-03 12:40:50|  [10.37, 14.5]|
+---+-------------------+---------+-------------------+-------------------+-------------------+---------------+

SQL的基础逻辑,按照id分组,组内按照startTimeStr排序,拼接payamount组成array,array中元素排序,按照startTimeStr升序排列

                              2020-05-28 于南京市江宁区九龙湖

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值