offset保存在zookeeper中
package main.scala
object CustomDirectKafkaExample {
private val conf = ConfigFactory.load()
private val sparkStreamingConf = conf.getStringList("CustomDirectKafkaExample-List").asScala
val sparkConf = new SparkConf()
val logger = Logger.getLogger(CustomDirectKafkaExample.getClass)
def main(args: Array[String]) {
if (args.length < 2) {
System.exit(1)
}
sparkConf.setAppName(conf.getString("CustomDirectKafkaExample")) // setting spark conf parameters
sparkStreamingConf.foreach { x => val split = x.split("="); sparkConf.set(split(0), split(1));}
val sc = new SparkContext(sparkConf) //create spark and memsql context
// declare checkpoint directory,pass to getOrCreate() method
val Array(brokers, topics) = args
val checkpointDir = CHECKPOINT_DIRECTORY_REQUEST
val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers)
val topicsSet = topics.split(",").toSet
val ssc = setupSsc(topicsSet, kafkaParams, checkpointDir,msc)
/* Start the spark streaming */
ssc.start()
ssc.awaitTermination();
}//main() ends
def setupSsc(topicsSet: Set[String], kafkaParams: Map[String, String])(): StreamingContext = {
val sc = msc.sparkContext
val ssc = new StreamingContext(sc, Seconds(conf.getInt("application.sparkbatchinterval")))
/* create direct kafka stream */
val messages = createCustomDirectKafkaStream(ssc,kafkaParams,"localhost","/kafka", topicsSet)
val line = messages.map(_._2)
val lines = line.flatMap(line => line.split("\n"))
val filterLines = lines.filter { x => LogFilter.filter(x, "0") }
filterLines.foreachRDD((rdd: RDD[String], time: Time) => {
rdd.foreachPartition { partitionOfRecords =>
{if(partitionOfRecords.isEmpty)
{
logger.info("partitionOfRecords FOUND EMPTY ,IGNORING THIS PARTITION")}
else
{
/* write computation logic here */
}//else loop ends
} //partition ends
})
ssc
}//setUp(ssc) ends
/* createDirectStream() method overloaded */
def createCustomDirectKafkaStream(ssc: StreamingContext, kafkaParams: Map[String, String], zkHosts: String
, zkPath: String, topics: Set[String]): InputDStream[(String, String)] = {
val topic = topics.last //TODO only for single kafka topic right now
val zkClient = new ZkClient(zkHosts, 30000, 30000)
val storedOffsets = readOffsets(zkClient,zkHosts, zkPath, topic)
val kafkaStream = storedOffsets match {
case None => // start from the latest offsets
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
case Some(fromOffsets) => // start from previously saved offsets
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder
, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
}
// save the offsets
kafkaStream.foreachRDD(rdd => saveOffsets(zkClient,zkHosts, zkPath, rdd))
kafkaStream
}
/*
Read the previously saved offsets from Zookeeper
*/
private def readOffsets(zkClient: ZkClient,zkHosts:String, zkPath: String, topic: String):
Option[Map[TopicAndPartition, Long]] = {
logger.info("Reading offsets from Zookeeper")
val stopwatch = new Stopwatch()
val (offsetsRangesStrOpt, _) = ZkUtils.readDataMaybeNull(zkClient, zkPath)
offsetsRangesStrOpt match {
case Some(offsetsRangesStr) =>
logger.info(s"Read offset ranges: ${offsetsRangesStr}")
val offsets = offsetsRangesStr.split(",")
.map(s => s.split(":"))
.map { case Array(partitionStr, offsetStr) => (TopicAndPartition(topic, partitionStr.toInt) -> offsetStr.toLong) }
.toMap
logger.info("Done reading offsets from Zookeeper. Took " + stopwatch)
Some(offsets)
case None =>
logger.info("No offsets found in Zookeeper. Took " + stopwatch)
None
}
}
private def saveOffsets(zkClient: ZkClient,zkHosts:String, zkPath: String, rdd: RDD[_]): Unit = {
logger.info("Saving offsets to Zookeeper")
val stopwatch = new Stopwatch()
val offsetsRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
offsetsRanges.foreach(offsetRange => logger.debug(s"Using ${offsetRange}"))
val offsetsRangesStr = offsetsRanges.map(offsetRange => s"${offsetRange.partition}:${offsetRange.fromOffset}")
.mkString(",")
logger.info("chandan Writing offsets to Zookeeper zkClient="+zkClient+" zkHosts="+zkHosts+"zkPath="+zkPath+" offsetsRangesStr:"+ offsetsRangesStr)
ZkUtils.updatePersistentPath(zkClient, zkPath, offsetsRangesStr)
logger.info("Done updating offsets in Zookeeper. Took " + stopwatch)
}
class Stopwatch {
private val start = System.currentTimeMillis()
override def toString() = (System.currentTimeMillis() - start) + " ms"
}
}
offset保存在kafka中
Spark Streaming+Kafka提交offset实现有且仅有一次(exactly-once)
1、提交offset的程序
package com.dkl.leanring.spark.kafka
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.Seconds
import org.apache.spark.TaskContext
object KafkaOffsetDemo {
def main(args: Array[String]) {
//创建sparkConf
val sparkConf = new SparkConf().setAppName("KafkaOffsetDemo").setMaster("local[2]")
// 创建StreamingContext batch size 为 1秒
val ssc = new StreamingContext(sparkConf, Seconds(1))
val kafkaParams = Map[String, Object](
"bootstrap.servers" -> "ambari.master.com:6667", //kafka集群地址
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "KafkaOffsetDemo", //消费者组名
"auto.offset.reset" -> "earliest", //当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
"enable.auto.commit" -> (false: java.lang.Boolean)) //如果是true,则这个消费者的偏移量会在后台自动提交
val topics = Array("top1") //消费主题
//创建DStream,返回接收到的输入数据
val stream = KafkaUtils.createDirectStream[String, String](
ssc,
PreferConsistent,
Subscribe[String, String](topics, kafkaParams))
// 打印获取到的数据,因为1秒刷新一次,所以数据长度大于0时才打印
stream.foreachRDD(f => {
if (f.count > 0) {
println("=============================")
println("打印获取到的kafka里的内容")
f.foreach(f => {
val value = f.value()
println(value)
})
println("=============================")
println("打印offset的信息")
// offset
val offsetRanges = f.asInstanceOf[HasOffsetRanges].offsetRanges
//打印offset
f.foreachPartition { iter =>
val o: OffsetRange = offsetRanges(TaskContext.get.partitionId)
println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
}
println("=============================")
// 等输出操作完成后提交offset
stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
}
})
//启动
ssc.start()
//等待停止
ssc.awaitTermination()
}
}
-
auto.offset.reset设置为earliest,即当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始,这样设置的目的是为了一开始可以获取到kafka对应主题下的所有的历史消息。
-
enable.auto.commit
设置为false,如果是true,则这个消费者的偏移量会在后台自动提交,这样设置目的是为了后面自己提交offset,因为如果虽然获取到了消息,但是后面的转化操作并将结果写到如hive中并没有完成程序就挂了的话,这样是不能将这次的offset提交的,这样就可以等程序重启之后接着上次失败的地方继续消费 -
group.id 是不能变得,也就是offset是和topic和group绑定的,如果换一个group的话,程序将从头消费所有的历史数据
-
这个api是将offset存储到kakfa的一个指定的topic里,名字为__consumer_offsets,而不是zookeeper中
2、测试程序
-
1、首先创建对应的topic 2、生产几条数据作为历史消息 1
bin/kafka-console-producer.sh --broker-list ambari.master.com:6667 --topic top1
3、启动上面的程序 4、继续生产几条数据
-
由图可得,这样可以将历史数据全部打印出来,并且后面实时增加的数据,也打印出来了,且可以看到offset是在增加的,最后一个offset是202,那么接下来测试一下程序重启之后是否会接着之前的数据继续消费呢
5、停止程序 6、生产几条数据 7、启动程序
-
可以看出,程序确实是接着上次消费的地方消费的,为了证实这一点,我将earliest和offset圈了起来,从offset可以看到是从上次的202开始消费的。
3、关于offset过期时间
- kafka
offset默认的过期时间是一天,当上面的程序挂掉,一天之内没有重启,也就是一天之内没有保存新的offset的话,那么之前的offset就会被删除,再重启程序,就会从头开始消费kafka里的所有历史数据,这种情况是有问题的,所以可以通过设置offsets.retention.minutes自定义offset过期时间,该设置单位为分钟,默认为1440。
修改kafka的offset过期时间详细信息见:https://dongkelun.com/2018/06/21/modifyKafkaOffsetTime/
4、自己保存offset
- 可以通过自己保存offset的信息到数据库里,然后需要时再取出来,根据得到的offset信息消费kafka里的数据,这样就不用担心offset的过期的问题了,因为没有自己写代码实现,所以先给出官网的示例代码:
// The details depend on your data store, but the general idea looks like this
// begin from the the offsets committed to the database
val fromOffsets = selectOffsetsFromYourDatabase.map { resultSet =>
new TopicPartition(resultSet.string("topic"), resultSet.int("partition")) -> resultSet.long("offset")
}.toMap
val stream = KafkaUtils.createDirectStream[String, String](
streamingContext,
PreferConsistent,
Assign[String, String](fromOffsets.keys.toList, kafkaParams, fromOffsets)
)
stream.foreachRDD { rdd =>
val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
val results = yourCalculation(rdd)
// begin your transaction
// update results
// update offsets where the end of existing offsets matches the beginning of this batch of offsets
// assert that offsets were updated correctly
// end your transaction
}