HIVE和其他数据库不一样
metaData.getTables
这个方法是connection的通用方法,获取某个用户下某个数据库的所有table
但是!!!这个方法对HIVE不管用,这个方法会取出这个用户下所有的表而且不会区分用户。
代码
@PostMapping("/tableListNew")
@ResponseBody
public List<String> tableListNew(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") DbType type,@RequestParam("dbid") int dbid ) throws SQLException {
DataSource dataSource = dataSourceMapper.queryDataSourceByID(dbid);
String cp = dataSource.getConnectionParams();
ConnectionParams connectionParams = JSON.parseObject(cp, ConnectionParams.class);
List<String> tableList = new ArrayList<>();
Connection connection = null;
ResultSet resultSet = null;
try {
if("ELASTICSEARCH".equals(type.name().trim())){
//链接ES
RestHighLevelClient client = DataSourceTools.getEsConnect(connectionParams);
//获取ES的所有index
tableList = EsUtils.listIndexs(client);
//关闭链接
EsUtils.close(client);
}else if("KUDU".equals(type.name().trim())){
tableList = KuduUtil.getKuduTableList(connectionParams.getKuduMasters());
}else{
connection = dataSourceService.getConnection(type,cp);
// 获取链接
if (null != connection) {
DatabaseMetaData metaData = connection.getMetaData();
// metaData.getUserName();
String typeDb = metaData.getDatabaseProductName().toUpperCase();
// metaData.getTables(catalog, schemaPattern, tableNamePattern,
// types)
//hive的获取方式 执行 sql = " show tables ";
if("APACHE HIVE".equals(typeDb)){
PreparedStatement ps = null;
ResultSet resultSetFor = null;
String sql = " show tables ";
ps = connection.prepareStatement(sql);
resultSetFor = ps.executeQuery();
while (resultSetFor.next()) {
tableList.add(resultSetFor.getString("tab_name"));
}
}else{//其他数据库方式获取
String schema = null;
try {
schema = metaData.getConnection().getSchema();
} catch (Throwable t) {
//schema = metaData.getConnection().getCatalog();
}
String dbSchemaPattern = DataSourceTools.getDBSchemaPattern(connectionParams, schema, typeDb);
String db = connectionParams.getDatabase();
resultSet = metaData.getTables(db, dbSchemaPattern, null, new String[] { "TABLE", "VIEW" });
if (null != resultSet) {
tableList = new ArrayList<>();
while (resultSet.next()) {
String table_name = resultSet.getString("TABLE_NAME");
tableList.add(table_name);
}
}
}
}
}
} catch (Exception e) {
//logger.info(e.getMessage());
e.printStackTrace();
} finally {
if(connection!=null){
DataSourceTools.close(resultSet,null,connection);
}
}
return tableList;
}
工具方法
/**
* get connection
*
* @param dbType datasource type
* @param parameter parameter
* @return connection for datasource
*/
public Connection getConnection(DbType dbType, String parameter) {
Connection connection = null;
BaseDataSource datasource = null;
try {
switch (dbType) {
case GAUSS:
datasource = JSON.parseObject(parameter, GaussDataSource.class);
Class.forName(Constants.COM_GAUSS_JDBC_DRIVER);
break;
case POSTGRESQL:
datasource = JSON.parseObject(parameter, PostgreDataSource.class);
Class.forName(Constants.ORG_POSTGRESQL_DRIVER);
break;
case MYSQL:
case DRDS :
datasource = JSON.parseObject(parameter, MySQLDataSource.class);
Class.forName(Constants.COM_MYSQL_JDBC_DRIVER);
break;
case HIVE:
case IMPALA:
case SPARK:
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(org.apache.dolphinscheduler.common.Constants.JAVA_SECURITY_KRB5_CONF,
getString(org.apache.dolphinscheduler.common.Constants.JAVA_SECURITY_KRB5_CONF_PATH));
Configuration configuration = new Configuration();
configuration.set(org.apache.dolphinscheduler.common.Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(org.apache.dolphinscheduler.common.Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(org.apache.dolphinscheduler.common.Constants.LOGIN_USER_KEY_TAB_PATH));
}
if (dbType == DbType.HIVE||dbType == DbType.IMPALA){
datasource = JSON.parseObject(parameter, HiveDataSource.class);
}else if (dbType == DbType.SPARK){
datasource = JSON.parseObject(parameter, SparkDataSource.class);
}
Class.forName(Constants.ORG_APACHE_HIVE_JDBC_HIVE_DRIVER);
break;
case CLICKHOUSE:
datasource = JSON.parseObject(parameter, ClickHouseDataSource.class);
Class.forName(Constants.COM_CLICKHOUSE_JDBC_DRIVER);
break;
case ORACLE:
datasource = JSON.parseObject(parameter, OracleDataSource.class);
Class.forName(Constants.COM_ORACLE_JDBC_DRIVER);
break;
case SQLSERVER:
datasource = JSON.parseObject(parameter, SQLServerDataSource.class);
Class.forName(Constants.COM_SQLSERVER_JDBC_DRIVER);
break;
case DB2:
datasource = JSON.parseObject(parameter, DB2ServerDataSource.class);
Class.forName(Constants.COM_DB2_JDBC_DRIVER);
break;
case HANA:
datasource = JSON.parseObject(parameter, HanaDataSource.class);
Class.forName(Constants.COM_GAUSS_HANA_DRIVER);
break;
case DM:
datasource = JSON.parseObject(parameter, DmDataSource.class);
Class.forName(Constants.COM_GAUSS_DM_DRIVER);
break;
case ODPS:
datasource = JSON.parseObject(parameter, OdpsDataSource.class);
Class.forName(Constants.COM_GAUSS_ODPS_DRIVER);
break;
case RDMS:
datasource = JSON.parseObject(parameter, RdmsDataSource.class);
String driver = datasource.getDriver();
Class.forName(driver);
break;
case KINGBASE:
datasource = JSON.parseObject(parameter, KingBaseDataSource.class);
Class.forName(Constants.COM_KINGBASE_DRIVER);
break;
default:
break;
}
if(datasource != null&&dbType != DbType.ODPS){
//处理MySQL5 兼容 MySQL8 的驱动必须带上 useSSL=false&characterEncoding=utf-8&serverTimezone=GMT%2B8
connection = DriverManager.getConnection(datasource.getJdbcUrl(), datasource.getUser(), datasource.getPassword());
//connection = DriverManager.getConnection(datasource.getJdbcUrl(), datasource.getUser(), datasource.getPassword());
}else if(datasource != null&&dbType == DbType.ODPS){//jdbc 连接 odps
Properties config = new Properties();
config.put("access_id", datasource.getUser());
config.put("access_key", datasource.getPassword());
config.put("project_name", datasource.getDatabase());
connection = DriverManager.getConnection(datasource.getJdbcUrl(),config);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
return connection;
}