1、MysqlAnalysis
使用spark 写入mysql 无须自己建表
import java.util.UUID
import net.sf.json.JSONObject
import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import scala.collection.mutable
object MysqlAnalysis {
def main(args: Array[String]): Unit = {
//创建sparkConf
val sparkConf = new SparkConf().setAppName("session").setMaster("local[*]")
//创建SparkSession
val spark = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
val sc = spark.sparkContext
//写入MySQL数据库
import spark.implicits._
val sessionAggrStatRDD = spark.sparkContext.makeRDD(Array(sessionAggrStat))
sessionAggrStatRDD.toDF().write
.format("jdbc")
.option("url", ConfigManager.config.getString("jdbc.url"))
.option("dbtable", "session_aggr_stat")
.option("user", ConfigManager.config.getString("jdbc.username"))
.option("password", ConfigManager.config.getString("jdbc.password"))
.mode(SaveMode.Append)
.save()
//关闭Spark
spark.close()
}
}
2、pom.xml
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.30</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
</plugin>
</plugins>
</build>