import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.io.IOUtils;
private static Configuration conf = new Configuration();// 这里创建conf对象有一个默认参数,boolean loadDefaults,默认为true
private static String rootPath = new String(
"hdfs://10.x.x.210:8020/user/hive/warehouse/ip_address.db/ip_address/");
// private static String rootPath = new String(
// "hdfs://nameservice1/user/hive/warehouse/ip_address.db/ip_address/");
private static FileSystem coreSys = null;
static {
try {
conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
conf.set("fs.defaultFS", "hdfs://qd01-cld-master003:8020");
coreSys = FileSystem.get(URI.create(rootPath), conf);
} catch (IOException e) {
System.out.println("初始化HDFS核心文件对象失败:" + e.getLocalizedMessage());
}
}
public static void main(String[] args) throws IOException {
FileSystem fs = FileSystem.newInstance(conf);
FileStatus[] fileStatuses = fs.listStatus(new Path("/"));
for (int i = 0; i < fileStatuses.length; i++) {
FileStatus fileStatus = fileStatuses[i];
System.out.println(fileStatus.getPath());
}
}