sparkStreaming与kafka的整合

sparkStreaming整合kafka
基于Direct方式整合kafka

package spark.com.test.day04

import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

//基于Direct方式整合kafka
object SparkStreamingWithDirctOps {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.spark_project").setLevel(Level.WARN)
    val conf=new SparkConf()
      .setAppName("SparkStreamingWithDirctOps")
      .setMaster("local[*]")
    //创建SteamingContext对象,第一参数为SparkConf对象,第二个参数为批次时间;
    val ssc=new StreamingContext(conf,Seconds(2))

    val kafkaparams=Map[String,String](
      "bootstrap.servers"->
      "haddoop01:9092,hadoop02:9092,hadoop03:9092",
      "auto.offset.reset"->"largest",//消费方式从最大偏移量开始读取数据
      "group.id"->"bd-1901-gropu-3"
    )
    val topics="spark".split(",").toSet//创建一个集合topics
    val message:InputDStream[(String,String)]=KafkaUtils
      .createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaparams,topics)
    message.print()
    ssc.start()
    ssc.awaitTermination()
    /*
    awaitTermination(long timeOut, TimeUnit unit)
当前线程阻塞,直到

等所有已提交的任务(包括正在跑的和队列中等待的)执行完
或者等超时时间到
或者线程被中断,抛出InterruptedException
然后返回true(shutdown请求后所有任务执行完毕)或false(已超时)

     */


  }
}

sparkStreaming基于Receiver方式整合kafka


package spark.com.test.day04

import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

//sparkStreaming基于Receiver方式整合kafka
object SparkStreamingWithReceiver2KafkaOps {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.spark_project").setLevel(Level.WARN)
    //整合入口kafkautils
    val conf = new SparkConf()
      .setAppName("SparkStreamingWithReceiver2KafkaOps")
      .setMaster("local[*]")
    //创建SteamingContext对象,第一参数为SparkConf对象,第二个参数为批次时间;
    val ssc = new StreamingContext(conf, Seconds(2))
    //连接kafka参数
    val kafkaParams = Map[String, String](
      "zookeeper.connect" ->
        "hadoop01:2181,hadoop02:2181,hadoop03:2181/kafka", //集群入口
      "group.id" -> "bd-1901-group-2", //消费组
      "auto.offset.reset" -> "smallest" //消费方式从头开始读
    )
    //创建map类型的参数topics
    val topics = Map[String, Int]("spark" -> 3)

    val message: ReceiverInputDStream[(String, String)] = KafkaUtils
      .createStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics, StorageLevel.MEMORY_ONLY)
    message.print()
    ssc.start()
    ssc.awaitTermination()
  }

  private def readfromKafka(ssc: StreamingContext) = {
    //接收kafka中的数据
    val zkQuorum = "hadoop01:2181,hadoop02:2181,hadoop03:2181/kafka"
    val groupId = "bd-1901-group-2"
    val topics = Map[String, Int](
      "spark" -> 3
    )

    /**
      * 返回值的key:kafka中每一条record对应的key
      * 返回值的value:kafka中每一条recoder对应的value
      * 这种方式只能从最开始的位置消费数据
      * ReceiverInputDStream中的key就是当前一条数据在kafka中的key,
      * value就是该条数据对应的value
      * KafkaUtils工具类入口kafak整合的时候需要用到
      */
    val inputStream: ReceiverInputDStream[(String, String)] = KafkaUtils
      .createStream(ssc, zkQuorum, groupId, topics)
    inputStream.print()
  }
}


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值