实践1:wordcount
1.1本地跑wordcount代码(无状态):
package com.badou.streaming
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.storage.StorageLevel
object wordCount {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println("Usage: wordCount <hostname> <port>")
System.exit(1)
}
val sparkConf = new SparkConf().setMaster("local[2]")setAppName("wordCount")
val ssc = new StreamingContext(sparkConf, Seconds(5))
val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
wordCounts.print()
wordCounts.saveAsTextFiles("hdfs://master:9000/stream_out", "doc")
ssc.start()
ssc.awaitTermination()
}
}
测试及结果:
![]()

1.2打jar包在本地spark上跑:
去掉setmaster参数:![]()
在脚本上指定master,run.sh脚本:
运行脚本:![]()
1>代表标准输出
2>代表错误输出
实践2:wordcount(有状态)
有状态代码:

完整代码:
WordCountState.scala:
package com.badou.streaming
import org.apache.spark.{HashPartitioner, SparkConf}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.storage.StorageLevel
object WordCountState {
def updateFunction(currentValues: Seq[Int], preValues: Option[Int]): Option[Int] = {
val current = currentValues.sum
val pre = preValues.getOrElse(0)
Some(current + pre)
}
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println("Usage: WordCountState <hostname> <port>")
System.exit(1)
}
val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
val ssc = new StreamingContext(sparkConf, Seconds(5))
ssc.checkpoint("hdfs://master:9000/hdfs_checkpoint")
val lines = ssc.socketTextStream(args(0), args(1).toI

本文详细介绍了Spark-Streaming的多个实践案例,包括无状态的wordcount、有状态的wordcount、时间窗口操作以及Receiver和Direct两种模式下的Kafka整合。通过具体的代码示例和运行脚本,展示了如何在不同场景下使用Spark-Streaming进行数据处理。
最低0.47元/天 解锁文章
1224

被折叠的 条评论
为什么被折叠?



