前言
java 连接对框架版本的需求不高,亲测jdk1.7,jdk1.8 ,jdk17,springboot1.5.6 到 springboot 2.7.6,都是可用的
上代码
依赖
不需要修改版本号,目前发现都可用
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>2.3.7</version>
<exclusions>
<exclusion>
<groupId>org.eclipse.jetty.aggregate</groupId>
<artifactId>jetty-all</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.4</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
连接工具类
具体代码 import 都给你贴出来
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.sql.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SparkConnection {
// 查询数据返回json
public String getJson(String jdbc_url,String sql) throws SQLException, ClassNotFoundException {
Connection connection = null;
// org.apache.hive.jdbc.HiveDriver
// Class.forName("org.apache.hive.jdbc.HiveDriver");
try {
connection = DriverManager.getConnection(jdbc_url);
List<Map<String,Object>> list = selectTable(connection,sql);
// 想办法把数组转成json,这里用的是springboot自带的转json工具
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(list);
} catch (Exception e) {
e.printStackTrace();
} finally {
if(connection != null)
connection.close();
}
return null;
}
// 查询数据返回List
public List<Map<String,Object>> getMaps(String jdbc_url,String sql) throws SQLException, ClassNotFoundException {
Connection connection = null;
//HiveDriver
// org.apache.hive.jdbc.HiveDriver
// Class.forName("org.apache.hive.jdbc.HiveDriver");
try {
connection = DriverManager.getConnection(jdbc_url);
return selectTable(connection,sql);
} catch (SQLException e) {
e.printStackTrace();
} finally {
if(connection != null)
connection.close();
}
return null;
}
// 执行查询
public List<Map<String,Object>> selectTable(Connection connection,String sql) {
//String sql = "select *from wf_process_hive_ro limit 10";
Statement stmt = null;
//ResultSet rs = null;
try {
stmt = connection.createStatement();
ResultSet set = stmt.executeQuery(sql);
List<Map<String,Object>> list = new ArrayList<>();
ResultSetMetaData metaData = set.getMetaData();
int columnCount = metaData.getColumnCount();
while(set.next()){
Map<String,Object> map = new HashMap<>();
for (int i = 1; i <= columnCount; ++i) {
String columnName = metaData.getColumnName(i).toLowerCase();
Object object = set.getObject(i);
map.put(columnName, object);
}
list.add(map);
}
return list;
//System.out.println("=====================================");
/*while (rs.next()) {
System.out.println(rs.getString(1) + "," + rs.getString(2));
}*/
//System.out.println("=====================================");
} catch (SQLException e) {
e.printStackTrace();
} finally {
close(stmt);
//close(rs);
}
return null;
}
// 关闭Statement
private void close(Statement stmt) {
if (stmt != null) {
try {
stmt.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
// 关闭ResultSet
private void close(ResultSet rs) {
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}
源码下载
源码地址
java连接spark/hive