在一个滑动的事件时间上进行聚合操作(spark3.0.0)
完整demo:
package structured_streaming
import java.sql.Timestamp
import java.util.Date
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{count, current_timestamp, window}
object WordCount2 {
def main(args: Array[String]): Unit = {
System.setProperty("HADOOP_USER_NAME", "hdfs")
val spark=SparkSession.builder()
.appName("WordCount")
.master("local")
.getOrCreate()
val socket=spark.readStream.format("socket")
.option("host","127.0.0.1")
.option("port","9998")
.load()
socket.isStreaming
import spark.implicits._
val words=socket.map(row=>(new Timestamp(new Date().getTime),row.getString(0).split(" ")))
val df=words.toDF("timestamp","word")
val windowedCounts = df.groupBy(
window($"timestamp", "10 minutes", "5 minutes"),
$"word"
).count()
val query =windowedCounts.writeStream
.outputMode("complete")
.format("console")
.start()
query.awaitTermination()
}
}
运行结果:
+--------------------+----+-----+
| window|word|count|
+--------------------+----+-----+
|[2020-08-17 16:55...| [r]| 1|
|[2020-08-17 16:50...| [e]| 2|
|[2020-08-17 16:50...|[aa]| 1|
|[2020-08-17 16:50...|[55]| 1|
|[2020-08-17 17:00...| [r]| 1|
|[2020-08-17 16:45...|[aa]| 1|
|[2020-08-17 16:45...|[55]| 1|
|[2020-08-17 16:55...| [e]| 2|
+--------------------+----+-----+