SparkStreaming整合KafkaCDH6.3.2版本(Offset保存在zookeeper上,Spark2.4 + kafka0.10.X)

依赖

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <maven.compiler.source>1.8</maven.compiler.source>
        <maven.compiler.target>1.8</maven.compiler.target>
        <java.version>1.8</java.version>
        <scala.binary.version>2.11</scala.binary.version>
        <scala.version>2.11.8</scala.version>
        <spark.version>2.4.0-cdh6.3.2</spark.version>
        <hadoop.version>3.0.0-cdh6.3.2</hadoop.version>
        <kafka.version>2.2.1-cdh6.3.2</kafka.version>
    </properties>

 

    <dependencies>

        <dependency>
            <groupId>com.emg</groupId>
            <artifactId>etp-common</artifactId>
            <version>0.0.1-SNAPSHOT</version>
        </dependency>

        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-library</artifactId>
            <version>${scala.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>

        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-compiler</artifactId>
            <version>${scala.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>
        <dependency>
            <groupId>org.scala-lang</groupId>
            <artifactId>scala-reflect</artifactId>
            <version>${scala.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>${jar.scope}</scope>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-streams-test-utils -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-streams-test-utils</artifactId>
            <version>${kafka.version}</version>
         <!--   <scope>test</scope>-->
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>${kafka.version}</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.12</artifactId>
            <version>2.2.1</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka-0-10_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-hive_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/com.alibaba/fastjson -->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.70</version>
        </dependency>

        <dependency>
            <groupId>joda-time</groupId>
            <artifactId>joda-time</artifactId>
            <version>2.9.2</version>
        </dependency>


        <dependency>
            <groupId>com.jolbox</groupId>
            <artifactId>bonecp</artifactId>
            <version>0.8.0.RELEASE</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.postgresql/postgresql -->
        <dependency>
            <groupId>org.postgresql</groupId>
            <artifactId>postgresql</artifactId>
            <version>42.2.12</version>
        </dependency>

        <dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-collections4</artifactId>
            <version>4.4</version>
        </dependency>

        <dependency>
            <groupId>org.locationtech.jts</groupId>
            <artifactId>jts-core</artifactId>
            <version>1.16.1</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.38</version>
        </dependency>


        <dependency>
            <groupId>com.uber</groupId>
            <artifactId>h3</artifactId>
            <version>3.6.3</version>
        </dependency>

        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-common</artifactId>
            <version>1.11.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-encoding</artifactId>
            <version>1.11.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-column</artifactId>
            <version>1.11.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.parquet</groupId>
            <artifactId>parquet-hadoop</artifactId>
            <version>1.11.0</version>
        </dependency>
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>
<!--

        <dependency>
              <groupId>org.gavaghan</groupId>
              <artifactId>geodesy</artifactId>
              <version>1.1.3</version>
        </dependency>
-->

    </dependencies>

代码

import scala.collection.JavaConversions._
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
 
import scala.collection.mutable
 
object KafkaZookeeperCheckPoint {
 
 
  // ZK client
  val client = {
    val client = CuratorFrameworkFactory
      .builder
      .connectString("bigdata:2181,bigdata:2182,bigdata:2183")
      .retryPolicy(new ExponentialBackoffRetry(1000, 3))
      .namespace("mykafka")
      .build()
    client.start()
    client
  }
 
  // offset 路径起始位置
  val Globe_kafkaOffsetPath = "/kafka/offsets"
 
  // 路径确认函数  确认ZK中路径存在,不存在则创建该路径
  def ensureZKPathExists(path: String)={
 
    if (client.checkExists().forPath(path) == null) {
      client.create().creatingParentsIfNeeded().forPath(path)
    }
 
  }
 
 
  // 保存 新的 offset
  def storeOffsets(offsetRange: Array[OffsetRange], groupName:String) = {
 
    for (o <- offsetRange){
      val zkPath = s"${Globe_kafkaOffsetPath}/${groupName}/${o.topic}/${o.partition}"
       //检查路径
      ensureZKPathExists(zkPath)
      // 向对应分区第一次写入或者更新Offset 信息
      println("---Offset写入ZK------\nTopic:" + o.topic +", Partition:" + o.partition + ", Offset:" + o.untilOffset)
      client.setData().forPath(zkPath, o.untilOffset.toString.getBytes())
    }
  }
 
  def getFromOffset(topic: Array[String], groupName:String):(Map[TopicPartition, Long], Int) = {
 
    // Kafka 0.8和0.10的版本差别,0.10 为 TopicPartition   0.8 TopicAndPartition
    var fromOffset: Map[TopicPartition, Long] = Map()
 
    val topic1 = topic(0).toString
 
    // 读取ZK中保存的Offset,作为Dstrem的起始位置。如果没有则创建该路径,并从 0 开始Dstream
    val zkTopicPath = s"${Globe_kafkaOffsetPath}/${groupName}/${topic1}"
 
    // 检查路径是否存在
    ensureZKPathExists(zkTopicPath)
 
    // 获取topic的子节点,即 分区
    val childrens = client.getChildren().forPath(zkTopicPath)
 
    // 遍历分区
    val offSets: mutable.Buffer[(TopicPartition, Long)] = for {
      p <- childrens
    }
      yield {
 
        // 遍历读取子节点中的数据:即 offset
        val offsetData = client.getData().forPath(s"$zkTopicPath/$p")
        // 将offset转为Long
        val offSet = java.lang.Long.valueOf(new String(offsetData)).toLong
        // 返回  (TopicPartition, Long)
        (new TopicPartition(topic1, Integer.parseInt(p)), offSet)
      }
    println(offSets.toMap)
 
    if(offSets.isEmpty){
      (offSets.toMap, 0)
    } else {
      (offSets.toMap, 1)
    }
 
 
  }
 
//    if (client.checkExists().forPath(zkTopicPath) == null){
//
//      (null, 0)
//    }
//    else {
//      val data = client.getData.forPath(zkTopicPath)
//      println("----------offset info")
//      println(data)
//      println(data(0))
//      println(data(1))
//      val offSets = Map(new TopicPartition(topic1, 0) -> 7332.toLong)
//      println(offSets)
//      (offSets, 1)
//    }
//
//  }
 
  def createMyZookeeperDirectKafkaStream(ssc:StreamingContext, kafkaParams:Map[String, Object], topic:Array[String],
                                         groupName:String ):InputDStream[ConsumerRecord[String, String]] = {
 
    // get offset  flag = 1  表示基于已有的offset计算  flag = 表示从头开始(最早或者最新,根据Kafka配置)
    val (fromOffsets, flag) = getFromOffset(topic, groupName)
    var kafkaStream:InputDStream[ConsumerRecord[String, String]] = null
    if (flag == 1){
      // 加上消息头
      //val messageHandler = (mmd:MessageAndMetadata[String, String]) => (mmd.topic, mmd.message())
      println(fromOffsets)
      kafkaStream = KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe(topic, kafkaParams, fromOffsets))
println(fromOffsets)
      println("中断后 Streaming 成功!")
 
    } else {
      kafkaStream = KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe(topic, kafkaParams))
 
      println("首次 Streaming 成功!")
 
    }
    kafkaStream
  }
 
  def main(args: Array[String]): Unit = {
 
    val processInterval = 5
    val brokers = "bigdata:9092,bigdata:9093,bigdata:9094"
    val topics = Array("zkKafka")
    val conf = new SparkConf().setMaster("local[2]").setAppName("kafka checkpoint zookeeper")
    // kafka params
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "zk_group",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
 
    val ssc = new StreamingContext(conf, Seconds(processInterval))
 
    val messages = createMyZookeeperDirectKafkaStream(ssc, kafkaParams, topics, "zk_group")
 
      messages.foreachRDD((rdd) => {
        if (!rdd.isEmpty()){
 
          println("###################:"+rdd.count())
        }
 
        // 存储新的offset
        storeOffsets(rdd.asInstanceOf[HasOffsetRanges].offsetRanges, "zk_group")
      })
 
    ssc.start()
    ssc.awaitTermination()
 
  }
 
}

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值