参考谁的也忘了,就当个案例看看吧。
其中注意一点:本地开发环境开发CDH集群的,引入的包一定要和CDH版本对应,不然提交到集群会报错。
package mes.test.com.main
import com.alibaba.fastjson.{JSON, JSONObject}
import mes.kk.com.utils.PropUtil
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import scala.collection.mutable.ArrayBuffer
/**
* @author yhb
* @date
* @description :
*/
object LogsToHive {
//加载配置变量
val prop = new PropUtil("config.properties")
val brokers = prop.getProp("KAFKA_BROKERS")
val groupName: String = this.getClass.getName.replace("$", " ")
def main(args: Array[String]): Unit = {
//获取SparkSession连接,没有则创建
// val spark = SparkSession.builder().appName(groupName).master("local[4]").enableHiveSupport().getOrCreate()
val spark = SparkSession.builder().appName(groupName).enableHiveSupport().getOrCreate()
val sc = spark.sparkContext
//设置日志级别
sc.setLogLevel("WARN")
val ssc = new StreamingContext(sc, Seconds(30))
//读取的topic
val topics = Array("logs")
//配置kafka参数 latest, earliest, none
val kafkaParams = Map[String, Object](
"bootstrap.servers" -> brokers,
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> groupName,
"auto.offset.reset" -> "latest",
"enable.auto.commit" -> (false: java.lang.Boolean)
)
//创建数据流
val messages = KafkaUtils.createDirectStream[String, String](
ssc,
PreferConsistent,
Subscribe[String, String](topics, kafkaParams)
)
var offsetRanges = Array[OffsetRange]()
messages.foreachRDD { kafkaRDD =>
//只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
//获取message中的具体数据
val kafkaData: RDD[String] = kafkaRDD.map(_.value())
val value: RDD[Row] = kafkaData.map