1.需求:使用netcat工具向9999端口不断的发送数据,通过SparkStreaming读取端口数据并统计不同单词出现的次数
2.添加依赖
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.1.1</version>
</dependency>
3.编写代码:这种wordCount是无状态的,只计算当前批次
```cpp
package com.atguigu
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.SparkConf
object StreamWordCount {
def main(args: Array[String]): Unit = {
//1.初始化Spark配置信息
val sparkConf = new SparkConf().setMaster("local[*]").setAppName("StreamWordCount")
//2.初始化SparkStreamingContext
val ssc = new StreamingContext(sparkConf, Seconds(5))
//3.通过监控端口创建DStream,读进来的数据为一行行
val lineStreams = ssc.socketTextStream("hadoop102", 9999)
//将每一行数据做切分,形成一个个单词
val wordStreams = lineStreams.flatMap(_.split(" "))
//将单词映射成元组(word,1)
val wordAndOneStreams = wordStreams.map((_, 1))
//将相同的单词次数做统计
val wordAndCountStreams = wordAndOneStreams.reduceByKey(_+_)
//打印
wordAndCountStreams.print()
//启动SparkStreamingContext
ssc.start()
ssc.awaitTermination()
}
}
4.编写代码:有状态的wordCount
object WordCountV2 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local[*]").setAppName("sparkStreaming1217")
val steamingContex = new StreamingContext(conf, Duration(5000))
val dstream1: DStream[String] = steamingContex.socketTextStream("hadoop-101",999)
val dstream2: DStream[String] = dstream1.flatMap(line => {
val fields = line.split(" ")
fields
})
steamingContex.sparkContext.setLogLevel("error")
val wordsAndOneDstream: DStream[(String, Int)] = dstream2.map(t => (t, 1))
val stateDstream: DStream[(String, Int)] = wordsAndOneDstream.updateStateByKey {
//seq 当前批次相同key的values形成的序列
//buffer 历时累加数据
case (seq, buffer) => {
var sum = buffer.getOrElse(0) + seq.sum
//相当于把最新的sum给buffer
Option(sum)
}
}
stateDstream.print()
//可以理解为启动采集器
steamingContex.start()
//等待采集器终止
steamingContex.awaitTermination() //防止main方法结束程序结束
//同时因为要和采集器绑定所以while(true) 方法无效
}
}