1、测试代码,使用三种方法保存kafka的offset(未优化版)
package kafka.comsumer
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import utils.PropUtil
/**
* @author yanghb
* @date 2019/7/25 10:03
* @description:使用kafka自己维护offset,读取多个topic
*/
object KafkaOffset {
//加载配置变量
val prop = new PropUtil("config.properties")
val oracleUrl = prop.getProp("ORACLE_URL")
val oracleUser = prop.getProp("ORACLE_USER")
val oraclePassword = prop.getProp("ORACLE_PASSWORD")
val brokers = prop.getProp("KAFKA_BROKERS")
val groupName:String = this.getClass.getName
def main(args: Array[String]): Unit = {
//获取SparkSession连接
val spark = SparkSession.builder().appName(groupName).master("local[4]").getOrCreate()
// val spark = SparkSession.builder().appName("SparkToOracleStatus").getOrCreate()
val sc = spark.sparkContext
//设置日志级别
sc.setLogLevel("WARN")
val ssc = new StreamingContext(sc, Seconds(5))
//读取的topic
// val topics=Array("ll","oo")
val topics = Array("testTopic2")
//配置kafka参数
val kafkaParams = Map[String, Object](
"bootstrap.servers" -> brokers,
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> groupName,
"auto.offset.reset" -> "latest",
"enable.auto.commit" -> (false: java.lang.Boolean)
)
//创建数据流
val messages = KafkaUtils.createDirectStream[String, String](
ssc,
PreferConsistent,
Subscribe[String, String](topics, kafkaParams)
)
var offsetRanges = Array[OffsetRange]()
//kafkaStreamDate.foreachRDD里面的业务逻辑是在Driver端执行
messages.foreachRDD { kafkaRDD =>
//判断当前的 kafkaStream 中的RDD是否有数据
if (!kafkaRDD.isEmpty()) {
// try{
//
// }catch {
// case e:Throwable => e.printStackTrace()
// }
//只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
//获取message中的具体数据
val kafkaData: RDD[String] = kafkaRDD.map(_.value())
//todo 此行可注释
for (o <- offsetRanges) {
println(o)
}
// 数据处理
kafkaData.foreachPartition(rdds => {
// 数据推送,将数据批量推送到oracle中
rdds.foreach(x => {
println(x)
})
})
// 更新偏移量。数据处理完更新偏移量到kafkagroup中
messages.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
}
}
ssc.start()
ssc.awaitTermination()
}
}
package kafka.comsumer
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{ConsumerStrategies,