有个问题flink的kerveros通过配置设置。但是HiveCatalog还需要再次使用UserGroupInformation再次认证。
直接上代码:
import com.amihaiemil.eoyaml.*;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.Catalog;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Properties;
public class FlinkEnvProvider {
public Properties getFlinkProperties() {
String krb5Conf="D:\\configs\\krb5.conf";
String keytab="D:\\configs\\xxx.keytab";
String principal= "xxx@XXXXXX.COM";
System.setProperty("java.security.krb5.conf", krb5Conf);
Properties flinkProps = new Properties();
flinkProps.setProperty("security.kerberos.krb5-conf.path",krb5Conf );
flinkProps.setProperty("security.kerberos.login.keytab",keytab );
flinkProps.setProperty("security.kerberos.login.principal",principal);
flinkProps.setProperty("security.kerberos.login.contexts", "Client,KafkaClient");
flinkProps.setProperty("state.backend", "hashmap");
try {
UserGroupInformation.loginUserFromKeytab(principal,keytab);
} catch (IOException e) {
throw new RuntimeException(e);
}
return flinkProps;
}
public FlinkCatalog getCatalog() {
String flinkSqlClientPath = "D:/configs/sql-client-local.yaml";
YamlMapping mapping;
try {
mapping = Yaml.createYamlInput(new File(flinkSqlClientPath)).readYamlMapping();
} catch (IOException e) {
throw new RuntimeException(e);
}
YamlSequence catalogs = mapping.yamlSequence("catalogs");
YamlMapping defaultCatalog = catalogs.yamlMapping(0);
String catalogName = defaultCatalog.string("name");
String catalogType = defaultCatalog.string("type");
String hiveConfDir = defaultCatalog.string("hive-conf-dir");
String defaultDatabase = defaultCatalog.string("default-database");
String hadoopConfDir = defaultCatalog.string("hadoop-conf-dir");
Catalog catalog = null;
if ("hive".equalsIgnoreCase(catalogType)) {
catalog = new HiveCatalog(catalogName, defaultDatabase, hiveConfDir, hadoopConfDir,null);
} else {
throw new UnsupportedOperationException("不支持的catalog类型:" + catalogType);
}
// System.out.println(catalogName + "\t->\t" + catalog);
return new FlinkCatalog(catalogName, catalog);
}
public StreamExecutionEnvironment getStreamEnv() {
Properties flinkProps = getFlinkProperties();
Configuration flinkConfig = new Configuration();
flinkConfig.addAllToProperties(flinkProps);
StreamExecutionEnvironment senv = StreamExecutionEnvironment.getExecutionEnvironment(flinkConfig);
return senv;
}
public StreamTableEnvironment getTableEnv() {
StreamExecutionEnvironment senv = getStreamEnv();
StreamTableEnvironment tenv = StreamTableEnvironment.create(senv);
FlinkCatalog catalog = getCatalog();
tenv.registerCatalog(catalog.getName(), catalog.getCatalog());
tenv.useCatalog(catalog.getName());
// System.out.println(tenv);
return tenv;
}
@Test
public void testTableEnv(){
StreamTableEnvironment tenv = getTableEnv();
tenv.useDatabase("hudi_db");
String[] tbls = tenv.listTables();
Arrays.stream(tbls).forEach(System.out::println);
}
@Data
@AllArgsConstructor
@NoArgsConstructor
public static class FlinkCatalog {
private String name;
private Catalog catalog;
}
}
sql-client-local.yaml
的文件内容:
# 仅保留有用内容
# Define catalogs here.
catalogs:
- name: flink_hive_catalog
type: hive
hive-conf-dir: D:/env/conf/hive
default-database: default
hadoop-conf-dir: D:/env/conf/hadoop