一、SparkRDD和SparkStream的区别
二、SparkStream从Kafka上获取信息最后保存到数据库中
package com.stream.com
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
/**
* TODO
*
* @author 徐磊
* @email wc199608203213@136.com
* @data2020/02/04 上午 11:43
*/
object SparkStream extends App {
//累加函数
val lj=(it:Iterator[(String,Seq[Int],Option[Int])])=>{
it.flatMap{
case(x,y,z)=>Some(y.sum+z.getOrEl