在之前的Spark程序中,我们是对RDD进行各种操作,SparkStreaming我们对DStream操作,其中DStream是discretized stream(离散流)的简写,官网对他的解释是:
DStreams can be created either from input data streams from sources such as Kafka, Flume, and Kinesis, or by applying high-level operations on other DStreams. Internally, a DStream is represented as a sequence of RDDs.
核心意思就是DStream就是一系列的有序的RDD集合
- 下面是个简单的SparkStreaming的WordCount的demo:
主要代码如下:
package cn.lijie
import org.apache.log4j.{Level, Logger}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, Logging, SparkConf, SparkContext}
/**
* User: lijie
* Date: 2017/8/2
* Time: 15:01
*/
object SparkStreamingWC {
/**
* 函数
* Iterator[(String, Seq[Int], Option[Int])]
* String: 表示key
* Seq[Int]: 表示当前批次的value e.g:Seq(1,1,1,1,1,1)
* Option[Int]: 表示之前的累加值
*/
val updateFunc = (it: Iterator[(String, Seq[Int], Option[Int])]) => {
it.map(x => {
(x._1, x._2.sum + x._3.getOrElse(0))
})
}
def main(args: Array[String]): Unit = {
//设置日志级别
myLog.setLogLeavel(Level.ERROR)
val conf = new SparkConf().setAppName("st").setMaster("local[2]")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(5))
//拉取socket的信息
val dStream = ssc.socketTextStream("192.168.80.123", 10086)
//当使用updateStateByKey这个算子必须设置setCheckpointDir
sc.setCheckpointDir("C:\\Users\\Administrator\\Desktop\\checkpoint")
//计算wordcount 累计
val res = dStream.flatMap(_.split(" ")).map((_, 1)).updateStateByKey(updateFunc, new HashPartitioner(sc.defaultParallelism), true)
//计算wordcount 每个批次
// val res = dStream.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_+_)
res.print()
ssc.start()
ssc.awaitTermination()
}
}
object myLog extends Logging {
/**
* 设置日志级别
*
* @param level
*/
def setLogLeavel(level: Level): Unit = {
val flag = Logger.getRootLogger.getAllAppenders.hasMoreElements
if (!flag) {
logInfo("set log level ->" + level)
Logger.getRootLogger.setLevel(level)
}
}
}
POM文件:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>sparkstreaming-demo</groupId>
<artifactId>sparkstreaming-demo</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
<encoding>UTF-8</encoding>
<scala.version>2.10.6</scala.version>
<spark.version>1.6.1</spark.version>
<hadoop.version>2.6.4</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.10</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.38</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src/main/scala</sourceDirectory>
<testSourceDirectory>src/test/scala</testSourceDirectory>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
<configuration>
<args>
<arg>-dependencyfile</arg>
<arg>${project.build.directory}/.scala_dependencies</arg>
</args>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>cn.lijie.SparkStreamingWC</mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
注意事项:
当使用updateStateByKey算子时候,必须设置checkpointDir,不然会报错:
socket数据源如下:
sparkStreaming计算如下: