transform
sparkStreaming接受socket数据实现所有批次的单词次数累加
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* 获取每一个批次中单词出现次数最多的前3位
*/
object transform {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.ERROR)
val sparkConf: SparkConf = new SparkConf().setAppName("MapWithStateWordCount").setMaster("local[2]")
val ssc = new StreamingContext(sparkConf,Seconds(2))
ssc.checkpoint("hdfs://node01:8020/ck")
val socketTextStream: ReceiverInputDStream[String] = ssc.socketTextStream("node01",9999)
val result: DStream[(String, Int)] = socketTextStream.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_)
val sortedDstream: DStream[(String, Int)] = result.transform(rdd=>{
val sortedRDD: RDD[(String, Int)] = rdd.sortBy(_._2,false)
val top3: Array[(String, Int)] = sortedRDD.take(3)
println("top3 start printing")
top3.foreach(println)
println("top3 print ended")
sortedRDD
})
//打印该批次中所有单词按照次数降序的结果
sortedDstream.print()
//开启流式计算
ssc.start()
ssc.awaitTermination()
}
}
输入
结果