参考链接:
spark通过JDBC读取hive事务表
https://github.com/Gowthamsb12/BigData-Blogs/blob/master/Spark_ACID
import org.apache.spark.sql.jdbc.JdbcDialect
object HiveDialect extends JdbcDialect {
override def canHandle(url : String): Boolean = url.startsWith("jdbc:hive2")
override def quoteIdentifier(colName: String): String = {
colName.split('.').map(part => s"`$part`").mkString(".")
}
}
// Register the scala object
import org.apache.spark.sql.jdbc.JdbcDialects
JdbcDialects.registerDialect(HiveDialect)
//测试
val jdbcDF = spark.read.format("jdbc").option("url", "jdbc:hive2://hdfs-test03.yingzi.com,hdfs-test04.yingzi.com,hdfs-test05.yingzi.com/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2").option("dbtable", "default.a11").option("user", "hive").option("password", "hive").option("fetchsize","20").load()
jdbcDF.count()