读取Kafka数据并计数的代码
package SparkStream
import org.apache.spark.{HashPartitioner, SparkConf}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
object LoadKafkaDataAndWC {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setAppName("LoadKafkaDataAndWC").setMaster("local[2]")
val ssc = new StreamingContext(conf, Seconds(5))
// 设置请求kafka的几个参数
val Array(zkQuorum, group, topics, numTheads) = args
//设置请求kafka的参数
ssc.checkpoint("hdfs://hadoop02:9000/ck-2018-8-19-002")
//获取每一个topic并放到一个Map里
val topicMap: Map[String, Int] = topics.split(" ").map((_, numTheads.toInt)).toMap
//调用KafkaUtils工具类获取Kafka数据
val data: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(ssc, zkQuorum, group, topicMap, StorageLevel.MEMORY_AND_DISK_SER)
//因为DStream 里的key是offset值,把DStream里的value数据取出来
val lines: DStream[String] = data.map(_._2)
val tup = lines.flatMap(_.split(" ")).map((_, 1))
val res: DStream[(String, Int)] = tup.updateStateByKey(func, new HashPartitioner(ssc.sparkContext.defaultParallelism), true)
res.print()
ssc.start()
ssc.awaitTermination()
}
val func = (it: Iterator[(String, Seq[Int], Option[Int])]) => {
it.map(x => {
(x._1, x._2.sum + x._3.getOrElse(0))
})
}
}
参数列表
只要在Kafka生产者中不断生成数据,这里就会读取到数据,并进行累加