import java.sql.DriverManager
import org.apache.spark.rdd.JdbcRDD
import org.apache.spark.{SparkConf, SparkContext}
object JDBCRDDDemo extends App {
val conf = new SparkConf().setAppName("JDBCRDDDemo").setMaster("local[2]")
val sc = new SparkContext(conf)
val connection = ()=>{
//获取驱动
Class.forName("com.mysql.jdbc.Driver").newInstance()
//建立连接
DriverManager.getConnection("jdbc:mysql:///video","root","123456")
}
/* 源码class JdbcRDD[T](sc : org.apache.spark.SparkContext,
getConnection : scala.Function0[java.sql.Connection],
sql : scala.Predef.String,
lowerBound : scala.Long,
upperBound : scala.Long,
numPartitions : scala.Int,
mapRow : scala.Function1[java.sql.ResultSet, T] = {
/* compiled code */ })
(implicit evidence$1 : scala.reflect.ClassTag[T]) extends org.apache.spark.rdd.RDD[T] with org.apache.spark.Logging {
override def getPartitions : scala.Array[org.apache.spark.Partition] = { /* compiled code */ }
override def compute(thePart : org.apache.spark.Partition, context : org.apache.spark.TaskContext) : scala.Iterator[T] = { /* compiled code */ }
}*/
val jdbcRdd = new JdbcRDD(sc,
connection,
"select * from user where id >= ? and id <= ?",
10,
20,
2,
r =>{
val id = r.getInt(1)
val email = r.getString(2)
(id,email)
})
val jrdd = jdbcRdd.collect()
println(jrdd.toBuffer)
sc.stop()
---------------------
作者:小东升职记
来源:CSDN
原文:https://blog.csdn.net/qq_38704184/article/details/86317131
版权声明:本文为博主原创文章,转载请附上博文链接!
作者:小东升职记
来源:CSDN
原文:https://blog.csdn.net/qq_38704184/article/details/86317131
版权声明:本文为博主原创文章,转载请附上博文链接!