环境:Spark-1.5.1
在这里,Spark的ThriftServer是HA模式。如果不是,则设置服务器地址即可
package abc.platform.spark;
import java.sql.*;
import org.apache.hadoop.conf.Configuration;
import ebda.platform.util.LoginUtil;
public class SparkSQLTest01 {
public static void main(String[] args) throws Exception {
String HA_CLUSTER_URL = "ha-cluster";
StringBuilder sb = new StringBuilder("jdbc:hive2://" + HA_CLUSTER_URL + "/default;" + securityConfig);
String url = sb.toString();
String sql = "SELECT * FROM table_Student";
ResultSet result = executeQuery(url, sql);
ResultSetMetaData resultMetaData = result.getMetaData();
Integer colNum = resultMetaData.getColumnCount();
while (result.next()) {
for (int j =1; j <= colNum; j++){
System.out.print(result.getString(j) + "\t");
}
System.out.println();
}
}
/**
* Spark SQL 查询表
* @param sqlStr
* @return ResultSet
*/
public static ResultSet executeQuery(String url,String sqlStr) throws Exception{
try {
Class.forName("org.apache.hive.jdbc.HiveDriver").newInstance();
} catch (Exception e) {
e.printStackTrace();;
}
Connection connection = null;
PreparedStatement statement = null;
ResultSet resultSet = null;
try{
connection = DriverManager.getConnection(url);
statement = connection.prepareStatement(sqlStr);
// 执行HQL
resultSet = statement.executeQuery();
/*while(resultSet.next()){
* //System.out.println(resultSet.getRow());
System.out.println("result1===="+resultSet.getObject(1));
//System.out.println("result2===="+result.getObject(2));
}*/
} catch(Exception e){
throw(e);
}
return resultSet;
}
}