1、报错信息
java.sql.SQLException: No suitable driver
at java.sql.DriverManager.getDriver(DriverManager.java:315)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$7.apply(JDBCOptions.scala:84)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions$$anonfun$7.apply(JDBCOptions.scala:84)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.(JDBCOptions.scala:83)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.(JDBCOptions.scala:34)
at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createRelation(JdbcRelationProvider.scala:32)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:306)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:178)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:146)
at com.dataexa.cp.base.datasource.DataBaseToDF.convert(DataBaseToDF.scala:22)
at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:63)
at com.dataexa.cp.base.datasource.DataSourceReader$$anonfun$getResult$1.apply(DataSourceReader.scala:56)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.MapLike$DefaultKeySet.foreach(MapLike.scala:174)
at com.dataexa.cp.base.datasource.DataSourceReader.getResult(DataSourceReader.scala:56)
at com.dataexa.cp.base.datasource.DataSourceReader$.main(DataSourceReader.scala:125)
at com.dataexa.cp.base.datasource.DataSourceReader.main(DataSourceReader.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
2、代码中报错的部分是这里
case class DataBaseToDF(sparkSession: SparkSession){
def convert(dataBase: DataBase): DataFrame = {
val dataFrame = sparkSession.read.format(dataBase.getDbType)
.options(Map("url" -> dataBase.getUrl,
"inferschema" -> "true",
"dbtable" -> dataBase.getTableName,
"user" -> dataBase.getUsername,
"password" -> dataBase.getPassword))
// .option ( "inferschema", "true" )
// .option("url",dataBase.getUrl)
// .option("dbtable",dataBase.getTableName)
// .option("user",dataBase.getUsername)
// .option("password",dataBase.getPassword)
.load()
dataFrame
}
}
spark读取mysql数据库的时候报的错。。。。
代码中加上driver的配置项,问题解决,同事写的时候说是不需要加,真是坑死人!
case class DataBaseToDF(sparkSession: SparkSession){
def convert(dataBase: DataBase): DataFrame = {
val dataFrame = sparkSession.read.format(dataBase.getDbType)
.options(Map("url" -> dataBase.getUrl,
"inferschema" -> "true",
"driver" -> "com.mysql.jdbc.Driver",
"dbtable" -> dataBase.getTableName,
"user" -> dataBase.getUsername,
"password" -> dataBase.getPassword))
// .option ( "inferschema", "true" )
// .option("url",dataBase.getUrl)
// .option("dbtable",dataBase.getTableName)
// .option("user",dataBase.getUsername)
// .option("password",dataBase.getPassword)
.load()
dataFrame
}
}
标签:option,dataBase,scala,yarn,报错,sql,spark,DataSourceReader
来源: https://blog.csdn.net/love_zy0216/article/details/89212035