package cn.kgc.day0819.test06
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
object SparkSQLSparkStreamingDemo extends App {
//TODO 创建一个spark StreamingContext对象
val conf: SparkConf = new SparkConf().setMaster("local[2]").setAppName("DEMO01")
val ssc = new StreamingContext(conf,Seconds(5))
//TODO 创建SparkSession对象
val spark=SparkSession.builder().config(conf).getOrCreate()
import spark.implicits._
//TODO 使用spark streaming来进行wordcount
val inputDstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop101",5678)
//TODO 对输入的流进行操作
// hadoop spark kafka
val wordDstream: DStream[String] = inputDstream.flatMap(_.split(" "))
// val wordAndOneDstream: DStream[(String, Int)] = wordDstream.map((,1))
// val wordcounts: DStream[(String, Int)] = wordAndOneDstream.reduceByKey(+_)
//wordcounts.print()
wordDstream.foreachRDD(
rdd=>{
if (rdd.count()!=0){
val df1=rdd.map(x=>Word(x)).toDF()
df1.createOrReplaceTempView("words")
spark.sql(
"""
|select word,count(*)
|from words
|group by word
""".stripMargin).show()
}
}
)
//TODO 通过start() 启动消息采集和处理
ssc.start()
//TODO 等待程序终止
ssc.awaitTermination()
}
case class Word(word:String)