packagecom.taiji.streamingimportjava.io._importjava.text.SimpleDateFormatimportjava.util.Propertiesimportjava.util.concurrent.ConcurrentHashMapimportcom.taiji.utils.ConfigurationXmlimportorg.apache.commons.lang.StringUtilsimportorg.apache.curator.framework.CuratorFrameworkFactoryimportorg.apache.curator.retry.ExponentialBackoffRetryimportorg.apache.kafka.clients.consumer.ConsumerRecordimportorg.apache.kafka.common.TopicPartitionimportorg.apache.kafka.common.serialization.StringDeserializerimportorg.apache.spark.rdd.RDDimportorg.apache.spark.sql.CarbonSession._importorg.apache.spark.sql._importorg.apache.spark.sql.types.{StringType, StructField, StructType}importorg.apache.spark.streaming.dstream.InputDStreamimportorg.apache.spark.streaming.kafka010._importorg.apache.spark.streaming.{Seconds, StreamingContext, Time}importorg.slf4j.LoggerFactoryimportscala.collection.mutableimport scala.util.control.Breaks.{break, breakable}/*** sparkstreaming通过消费多个topic的数据进行分类存放。
* 目前测试发现此种方式是效率最高的每分钟可消费1000w左右的数据
*
* spark 2.3.2
* kafka服务端版本是2.10 spark-streaming-kafka-0-10_2.11-2.3.2
*
* 数据写入carbondata*/object CoreRealtimeStreaming {private val log = LoggerFactory.getLogger("coreStreaming")
def main(args: Array[String]): Unit={
val spark=SparkSession
.builder()
.appName("Streaming")
.config("spark.streaming.kafka.maxRatePerPartition", kafkaMaxRatePerPar)
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")//.config("spark.executor.memoryOverhead", 4096)//.config("spark.yarn.executor.memoryOverhead", 4096)
.enableHiveSupport()
.getOrCreateCarbonSession()
val ssc= newStreamingContext(spark.sparkContext, Seconds(time.toLong))
spark.sql("use core")//创建carbon表
createCarbonTable(spark)
val sc=spark.sparkContext
val schemaPathBro=sc.broadcast(schemaPath)
val configurationMap=ConfigurationXml.testParserXml(xmlPath)
val colAndPosMap=ConfigurationXml.getColAndPosMapfor (tp
val topic=tpArray(tp)
val groupId= topic.split("_topic", -1)(0) + "_group"val kafkaParams=Map[String, Object]("bootstrap.servers" ->topic_brokers,"key.deserializer" ->classOf[StringDeserializer],"value.deserializer" ->classOf[StringDeserializer],"auto.offset.reset" -> "latest","group.id" ->groupId,"enable.auto.commit" -> "false")
log.info("consumer topic is: " +topic)
val kafkaStream=createDirectKafkaStream(ssc, kafkaParams, topic, groupId)
var offsetRanges=Array[OffsetRange]()
val resourceRdd= kafkaStream.transform { rdd =>offsetRanges= rdd.asInstanceOf[HasOffsetRanges].offsetRanges //得到该 rdd 对应 kafka 的消息的 offset
rdd
}.map(rdd=>{
rdd.value()
})
resourceRdd.foreachRDD((rdd, time: Time)=>{if (!rdd.isEmpty()) {//根据配置文件构造schema
val properties =getSchemaProperties(schemaPathBro.value)
val fields=properties.getProperty(topic)
val schema= StructType(fields.split(",", -1).map(t =>{
StructField(t, StringType,true)
}))//代码的具体逻辑
val correctBatchData = rdd.map(str =>{
val trksj= new SimpleDateFormat("yyyyMMdd").format(System.currentTimeMillis())
val appendStr= str + "\t" +trksj
val dataStr= appendStr.split("\t", -1)
Row.fromSeq(dataStr)
})
val correctDf=spark.createDataFrame(correctBatchData, schema)
val tableName= "bcp_" + topic.split("_topic", -1)(0)//默认存储位parquet//errorDataDf.write.mode(SaveMode.Append).partitionBy("TRKSJ").saveAsTable(tableName)//carbondata和partition不能同时使用,如下不支持分区//errorDataDf.write.mode(SaveMode.Append).format("carbondata").partitionBy("TRKSJ").saveAsTable(tableName)
CarbonSparkStreamingFactory.getStreamSparkStreamingWriter(spark, "default", tableName)
.mode(SaveMode.Append)
.writeStreamData(correctDf, time)
}
}//记录偏移量
storeOffsets(offsetRanges, groupId)
})
}
ssc.start()
ssc.awaitTermination()
spark.stop()
}catch{case e: Exception =>{
log.error("CoreRealtimeStreaming is error. ", e)
}
}
}
def closeZkClient() {if (client != null) {
client.close()
}
}
val config={
val configFile= new File("config.xml")if (!configFile.exists || !configFile.isFile) System.exit(-1)
val configInfo= newPropertiestryconfigInfo.loadFromXML(newFileInputStream(configFile))catch{case e: IOException =>log.error("config.xml Incorrect format!")
System.exit(-1)
}
configInfo
}
val client={
val zkHost= config.getProperty("zk_host")if (zkHost == null || zkHost.length == 0) {
log.error("zkHost is not config.")
System.exit(-1)
}
val client=CuratorFrameworkFactory
.builder
.connectString(zkHost)
.retryPolicy(new ExponentialBackoffRetry(1000, 3))
.namespace("kafka")
.build()
client.start()
client
}//offset 路径起始位置
val Globe_kafkaOffsetPath = "/consumers/streaming"
//路径确认函数 确认ZK中路径存在,不存在则创建该路径
def checkZKPathExists(path: String) ={if (client.checkExists().forPath(path) == null) {
client.create().creatingParentsIfNeeded().forPath(path)
}
}//保存 新的 offset
def storeOffsets(offsetRange: Array[OffsetRange], groupName: String) ={for (o
val zkPath= s"${Globe_kafkaOffsetPath}/${groupName}/${o.topic}/${o.partition}"
//向对应分区第一次写入或者更新Offset 信息
log.info("Topic:" + o.topic + ", Partition:" + o.partition + ", Offset:" + o.untilOffset + " to zk")if (client.checkExists().forPath(zkPath) == null) {
client.create().creatingParentsIfNeeded().forPath(zkPath)
}
client.setData().forPath(zkPath, o.untilOffset.toString.getBytes())
}
}
def getFromOffset(topic: String, groupName: String): (Map[TopicPartition, Long], Int)={//Kafka 0.8和0.10的版本差别,0.10 为 TopicPartition 0.8 TopicAndPartition//读取ZK中保存的Offset,作为Dstrem的起始位置。如果没有则创建该路径,并从 0 开始Dstream
val zkTopicPath = s"${Globe_kafkaOffsetPath}/${groupName}/${topic}"
//检查路径是否存在
checkZKPathExists(zkTopicPath)//获取topic的子节点,即 分区
val childrens =client.getChildren().forPath(zkTopicPath)importscala.collection.JavaConversions._//遍历分区
val offSets: mutable.Buffer[(TopicPartition, Long)] = for{
p
} yield {//遍历读取子节点中的数据:即 offset
val offsetData = client.getData().forPath(s"$zkTopicPath/$p")//将offset转为Long
val offSet = java.lang.Long.valueOf(newString(offsetData)).toLong//返回 (TopicPartition, Long)
(newTopicPartition(topic, Integer.parseInt(p)), offSet)
}
println(offSets.toMap)if(offSets.isEmpty) {
(offSets.toMap,0)
}else{
(offSets.toMap,1)
}
}
def createDirectKafkaStream(ssc: StreamingContext, kafkaParams: Map[String, Object], topic: String,
groupName: String): InputDStream[ConsumerRecord[String, String]]={//get offset flag = 1 表示基于已有的offset计算 flag = 表示从头开始(最早或者最新,根据Kafka配置)
val (fromOffsets, flag) =getFromOffset(topic, groupName)
var kafkaStream: InputDStream[ConsumerRecord[String, String]]= null
if (flag == 1) {//加上消息头//val messageHandler = (mmd:MessageAndMetadata[String, String]) => (mmd.topic, mmd.message())
println(fromOffsets)
kafkaStream=KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe(Array(topic), kafkaParams, fromOffsets))
println(fromOffsets)
}else{
kafkaStream=KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe(Array(topic), kafkaParams))
}
kafkaStream
}
def getSchemaProperties(path: String): Properties={//保证读取出来的sql顺序与文件顺序一致
val properties = newProperties()//使用InPutStream流读取properties文件
val bufferedReader = new BufferedReader(newFileReader(path))
properties.load(bufferedReader)
properties
}/*** create carbondata table
*
*@paramspark sparksession*/def createCarbonTable(spark: SparkSession): Unit={
val testTable= "test"spark.sql(
s""" |CREATE TABLE IF NOT EXISTS $testTable(|obj_lx String,|obj_val String,|tags String,| CJSJ int,| TRKSJ int)| STORED BY 'carbondata'
|TBLPROPERTIES(| 'streaming'='true',| 'sort_columns'='TRKSJ')| """.stripMargin)}
}