1.通过RDD函数批量存入数据
object RDDtoMysql {
def myFun(iterator: Iterator[(String, Int)]): Unit = {
var conn: Connection = null
var ps: PreparedStatement = null
val sql = "insert into sparktomysql(name, age) values (?, ?)"
try {
conn = DriverManager.getConnection("jdbc:mysql://127.0.0.1:3306/test_dw","test_dw", "123456")
iterator.foreach(data => {
ps = conn.prepareStatement(sql)
ps.setString(1, data._1)
ps.setInt(2, data._2)
ps.executeUpdate()
}
)
} catch {
case e: Exception => println("Mysql Exception")
} finally {
if (ps != null) {
ps.close()
}
if (conn != null) {
conn.close()
}
}
}
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("RDDToMysql").setMaster("local")
val sc = new SparkContext(conf)
val data = sc.parallelize(List(("www", 10), ("iteblog", 20), ("com", 30)))
data.foreachPartition(myFun) //批量导入
}
}
2.DataFrame类操作mysql存入(适用于新建表和清空原来数据)
def main(args: Array[String]): Unit = {
val url = "jdbc:mysql://localhost:3306/spark?user=iteblog&password=iteblog"
val sc = new SparkContext
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val schema = StructType(
StructField("name", StringType) ::
StructField("age", IntegerType)
:: Nil)
val data = sc.parallelize(List(("iteblog", 30), ("iteblog", 29),("com", 40), ("bt", 33), ("www", 23))).map(item => Row.apply(item._1, item._2))
val df = sqlContext.createDataFrame(data, schema)
df.insertIntoJDBC(url, "sparktomysql", true)//true代表删除原来数据进行插入
sc.stop
}