package com.test.sparkStreaming
import java.sql.{DriverManager, PreparedStatement}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
object MyNetWorkWordCountMysqlState {
def main(args: Array[String]): Unit = {
Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
//加载配置文件,会去加载resources下面的配置文件,
// 默认规则:application.conf -> application.json -> application.properties
val config: Config = ConfigFactory.load()
//创建Streamingcontext对象
val conf = new SparkConf().setAppName("MyNetWorkWordCountMysqlState").setMaster("local[2]")
//定义一个采样时间,每隔2秒钟采集一次数据,这个时间不能随意设置
val ssc: StreamingContext = new StreamingContext(conf,Seconds(2))
//创建一个离散流
val lines = ssc.soc
大数据学习之路107-spark streaming基于mysql历史state统计
最新推荐文章于 2020-12-06 11:40:47 发布