kafka+sparkstreaming 获取每个分区的偏移范围

package com.kafka.wordcount


import java.util

import kafka.serializer.StringDecoder
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{HashPartitioner, SparkConf, TaskContext}
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.JavaConverters._

/**
  * Created by root on 2016/5/21.
  */
object KafkaWordCount {
//
//  val updateFunc = (iter: Iterator[(String, Seq[Int], Option[Int])]) => {
//    //iter.flatMap(it=>Some(it._2.sum + it._3.getOrElse(0)).map(x=>(it._1,x)))
//    iter.flatMap { case (x, y, z) => Some(y.sum + z.getOrElse(0)).map(i => (x, i)) }
//  }
//
//  val arr = new util.ArrayList[String]()


  def main(args: Array[String]) {
    LoggerLevels.setStreamingLogLevels()
//    val Array(zkQuorum, group, topics, numThreads) = args
    val sparkConf = new SparkConf().setAppName("KafkaWordCount").setMaster("local[*]")
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    val kafkaParams = Map[String, String](
      "bootstrap.servers" -> "m01:9092,m02:9092,m03:9092"
    )
    //与kafka建立连接
//    ssc.checkpoint("")

    val TopicsSet = "topic_test1".split(",").toSet
    val data = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, TopicsSet)

    val data2 = data.map(msg=>{
      msg._1+msg._2
    })

    data2.foreachRDD { rdd =>
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd.foreachPartition { iter =>
        val o: OffsetRange = offsetRanges(TaskContext.get.partitionId)
        println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
      }
    }
    ssc.start()
    ssc.awaitTermination()
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值