第一步: 添加maven依赖
<repositories>
<repository>
<id>scala-tools.org</id>
<name>Scala-Tools Maven2 Repository</name>
<url>http://scala-tools.org/repo-releases</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<!--sparkSQL 依赖-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!--hiveSQL 的依赖-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!--配置hive的依赖-->
<dependency>
<groupId>org.spark-project.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.1.spark2</version>
</dependency>
</dependencies>
第二步: 测试代码
import java.sql.DriverManager
/**
* 通过JDBC的方式来访问
*/
object SparkSQLThriftServerApp {
def main(args: Array[String]){
//配置连接数据库
Class.forName("org.apache.hive.jdbc.HiveDriver")
// url: beeline的访问路径 ip是你的服务器的ip, username: 服务器的用户名 password: 服务器登录密码
val conn = DriverManager.getConnection("jdbc:hive2://ip:10000","username","password")
val pstm = conn.prepareStatement("select name,password from ligh")
val res = pstm.executeQuery()
//遍历数据
while (res.next()){
println("name: "+res.getString("name") + ", password: " +res.getString("password"))
}
//关闭数据源
res.close()
pstm.close()
conn.close()
}
}
第三步: 测试结果
第四步: 注意事项
4.1 千万不要忘了开启thriftserver服务器端,不然就会报一下错误:
Could not open client transport with JDBC Uri: jdbc:hive2://ip:10000: java.net.ConnectException: Connection refused (Connection refused)
4.2 如果你是阿里云服务器,一定不要忘了开启安全组端口,如果你是自己本机搭建的虚拟机,不要忘记关闭防火墙.
有时间就学习,每天进步一点点…