**
- sparkStreaming整合flume 拉模式Poll
- spark-streaming-flume-sink_2.11-2.0.2.jar放入到flume的lib目录下
配置文件 flume-poll.conf
a1.sources = r1
a1.sinks = k1
a1.channels = c1
#source
a1.sources.r1.channels = c1
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /opt/flume-1.7.0/data
a1.sources.r1.fileHeader = true
#channel
a1.channels.c1.type =memory
a1.channels.c1.capacity = 20000
a1.channels.c1.transactionCapacity=5000
#sinks
a1.sinks.k1.channel = c1
a1.sinks.k1.type = org.apache.spark.streaming.flume.sink.SparkSink
a1.sinks.k1.hostname=虚拟机ip
a1.sinks.k1.port = 8888
a1.sinks.k1.batchSize= 2000
开启flume bin/flume-ng agent -n a1 -c /opt/flume-1.7.0/conf/ -f /opflume-1.7.0/conf/flume-poll.conf -Dflume.root.logger=INFO.console
*/
object SparkStreamingFlumePoll {
//newValues 表示当前批次汇总成的(word,1)中相同单词的所有的1
//runningCount 历史的所有相同key的value总和
def updateFunction(newValues: Seq[Int], runningCount: Option[Int]): Option[Int] = {
val newCount =runningCount.getOrElse(0)+newValues.sum
Some(newCount)
}
def main(args: Array[String]): Unit = {
//配置sparkConf参数
val sparkConf = new SparkConf().setAppName("SparkStreaming_Flume_Poll").setMaster("local[2]")
//构建sparkContext对象
val sc = new SparkContext(sparkConf)
sc.setLogLevel("WARN")
//构建StreamingContext对象,每个批处理的时间间隔
val scc = new StreamingContext(sc, Seconds(5))
//设置checkpoint
scc.checkpoint("./")
//设置flume的地址,可以设置多台
val address = Seq(new InetSocketAddress("ip地址",8888))
// 从flume中拉取数据
val flumeStream = FlumeUtils.createPollingStream(scc,address,StorageLevel.MEMORY_AND_DISK)
//获取flume中数据,数据存在event的body中,转化为String
val lineStream = flumeStream.map(x=>new String(x.event.getBody.array()))
//实现单词汇总
val result = lineStream.flatMap(_.split(" ")).map((_,1)).updateStateByKey(updateFunction)
result.print()
scc.start()
scc.awaitTermination()
}
}