此程序根据Spark官网示例
Spark官网
def WriteToCK(): Unit ={
System.setProperty("hadoop.home.dir", "D:\\softWare\\ruanjian\\spark\\hadoop-2.7.7")
val spark = SparkSession
.builder()
.master("local")
.config("spark.sql.caseSensitive", "true")
.getOrCreate()
spark.sparkContext.setLogLevel("warn")
//读取文件,并转化为json
val dataFrame = spark.read.json("D://a.log")
//设置用户名、密码
val connectionProperties = new Properties()
connectionProperties.setProperty("user", username)
connectionProperties.setProperty("password", password)
//写入程序 append为追加写入
dataFrame.write.mode("append").option("batchsize", "50000").option("isolationLevel", "NONE").
option("numPartitions", "1").jdbc("jdbcUrl", "表名", connectionProperties)
}