hadoop多节点的两种初始化方式
第一种 编码的方式
Configuration conf=new Configuration();
conf.set("fs.defaultFS", "hdfs://mycluster");
conf.set("dfs.nameservices","mycluster");
conf.set("dfs.ha.namenodes.mycluster", "nn1,nn2");
conf.set("dfs.namenode.rpc-address.mycluster.nn1", "192.168.1.7:8020");
conf.set("dfs.namenode.rpc-address.mycluster.nn2", "192.168.1.8:8020"); conf.set("dfs.client.failover.proxy.provider.mycluster","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
conf.setBoolean("dfs.ha.automatic-failover.enabled",true);
fs = FileSystem.get(conf);
第二种 通过配置文件的方式
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("root");
try {
ugi.doAs((PrivilegedExceptionAction) () -> {
try {
conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://mycluster");
conf.set("hadoop.job.ugi", "root");
fs = FileSystem.get(conf);
} catch (IOException e) {
e.printStackTrace();
}
return null;
});
} catch (Exception e) {
e.printStackTrace();
}