object mappartition写入数据库 extends App{
val spark=SparkSession.builder().appName("spark test function").master("local[*]").enableHiveSupport().getOrCreate()
val conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/mzf_sn?characterEncoding=utf8", "root", "root")
val sc=spark.sparkContext
val a:RDD[Int] = sc.parallelize(1 to 1000000,2 )
val count=a.foreachPartition(v => new CustomIterator3(v))
class CustomIterator3(iter: Iterator[Int]) extends Iterator[Int] {
@transient val conn = DriverManager.getConnection(
"jdbc:mysql://localhost:3306/mzf_sn?characterEncoding=utf8",
"root",
"root"
);
println("调用分区")
while(iter.hasNext){
val cur=iter.next()
val sql="insert into t_test(id) values ("+cur.toString+")"
val stmt = conn.createStatement
stmt.executeUpdate(sql)
}
override def hasNext: Boolean = {
iter.hasNext
}
override def next():Int={
val cur=iter.next()
1
}
}
}