使用JDBC编程访问,spark访问hive
1.启动thriftserver服务 (一定先启动服务,否则会报错)
./start-thriftserver.sh --master local[2] --jars /opt/mysql-connector-java-5.1.22-bin.jar
2.引入hive-jdbc包
<dependency>
<groupId>org.spark-project.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.1.spark2</version>
</dependency>
3.用代码连接
import java.sql.DriverManager
/**
* 通过JDBC的方式访问
*/
object SparkSQLThriftserverApp {
def main(args: Array[String]): Unit = {
Class.forName("org.apache.hive.jdbc.HiveDriver")
val conn = DriverManager.getConnection("jdbc:hive2://hadoop01:10000","root","")
val pstmet = conn.prepareStatement("select t.order_id ,t.user_id from orders t limit 10")
val rs = pstmet.executeQuery()
while(rs.next()){
println("order_id:"+rs.getString("order_id") + ",user_id:"+rs.getString("user_id"))
}
rs.close()
pstmet.close()
conn.close()
}
}