- 默认配置文件优先级<手动配置文件优先级<API set方法
- 通过 手动配置 上传文件查看 之后再用set 方法进行设置块大小查看结果
//连接方法
Configuration entries = new Configuration();
//获取连接地址并且使用root 用户
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop100:9000"), entries, "root");
//关流
fs.close();
“hdfs://hadoop100:9000” 像这种链接地址用户 需要设置成静态常量变动之后方便修改
private FileSystem fs = null;
@Before
public void before() {
//创建连接
fs = HDFSUtils.getFS();
}
@After
public void after() {
//关流
HDFSUtils.close(fs);
}
@Test
public void test01() throws IOException {
boolean mkdirs = fs.mkdirs(new Path("/hadoop100"));
if (mkdirs) {
System.out.println("创建目录成功");
} else {
System.out.println("创建目录失败");
}
}
使用工具类连接
工具类代码块
private static final String DEFAULTFSNAME = "fs.defaultFS";
private static final String DEFAULTFSVALUE = "hdfs://hadoop100:9000";
private static final String HADOOP_USRE_NAME = "HADOOP_USRE_NAME";
private static final String HADOOP_USERR_VALUE = "root";
//获取FileSyste对象
public static FileSystem getFS() {
//1.创建Configuration对象,保存FileSystem需要的s所有配置信息
Configuration entries = new Configuration();
//配置服务器地址,端口
entries.set(DEFAULTFSNAME, DEFAULTFSVALUE);
//手动配置块的大小
//entries.set("dfs.blocksize", "128m");
//设置
System.setProperty(HADOOP_USRE_NAME, HADOOP_USERR_VALUE);
//2.创建FileSystem对象并且关联设置
FileSystem fs = null;
try {
fs = FileSystem.get(entries);
} catch (IOException e) {
e.printStackTrace();
System.out.println("获取系统文件对象失败!");
}
return fs;
}
//关闭资源
public static void close(FileSystem fs) {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
配置文件
设置块的大小 默认<手动<API
<property>
<name>dfs.block.size</name>
<value>134217728</value>
<description>Block size</description>
</property>
/**
*创建目录
*/
@Test
public void kmkdirs() throws IOException {
boolean mkdirs = fs.mkdirs(new Path("/hadoop101"));
if (mkdirs) {
System.out.println("创建目录成功");
} else {
System.out.println("创建目录失败");
}
}
/**
* 删除 hdfs文件夹
*/
@Test
public void rmdir() throws IOException {
//第一个参数是地址 第二个参数是否开启递归
boolean delete = fs.delete(new Path("/test.txt"), true);
if (delete) {
System.out.println("当前目录删除成功");
} else {
System.out.println("当前目录删除失败");
}
}
/**
* 重命名
*/
@Test
public void rname() {
try {
boolean rename = fs.rename(new Path("/user"), new Path("/data"));
if (rename) {
System.out.println("修改成功");
} else {
System.out.println("修改失败");
}
} catch (IOException e) {
e.printStackTrace();
System.out.println("修改失败");
}
}
/**
* 写入数据
*/
@Test
public void writes() {
FSDataOutputStream fsDataOutputStream = null;
try {
fsDataOutputStream = fs.create(new Path("/a.txt"));
fsDataOutputStream.write("aaa0".getBytes());
} catch (IOException e) {
e.printStackTrace();
System.out.println("写入失败");
} finally {
try {
fsDataOutputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
/**
* 列举某目录下的所有文件
* 用迭代器实现
*/
@Test
public void listFiles() throws IOException {
RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path("/"), true);
while (files.hasNext()) {
LocatedFileStatus next = files.next();
System.out.println(next.getAccessTime());
System.out.println(next.getBlockSize());
System.out.println(next.getLen());
System.out.println(next.getPath().getName());
System.out.println(next.getReplication());
BlockLocation[] blockLocations = next.getBlockLocations();
for (BlockLocation bl : blockLocations) {
System.out.println("bl-length" + bl.getLength() +
"bl-off" + bl.getOffset());
String[] hosts = bl.getHosts();
for (String host : hosts) {
System.out.println("host:" + host);
}
}
System.out.println("-------------------------");
}
}
/**
* 获取hdfs的磁盘信息
*/
@Test
public void getSource() throws IOException {
FsStatus status = fs.getStatus();
//全部内存
System.out.println("全部内存" + status.getCapacity());
//可用内存
System.out.println("可用内存" + status.getRemaining());
//已用内存
System.out.println("已用内存" + status.getUsed());
System.out.println(status.getClass());
}
/**
* 获取节点信息
*/
@Test
public void getNodeInfo() throws IOException {
DistributedFileSystem fs = (DistributedFileSystem) this.fs;
DatanodeInfo[] dataNodeStats = fs.getDataNodeStats();
for (DatanodeInfo dataNodeStat : dataNodeStats) {
System.out.println(dataNodeStat.getName() +":"+ dataNodeStat.getHostName());
}
}
/**
* 使用第三方工具类
*/
@Test
public void uploadFile() throws IOException {
//1.获取一个文件的输出流--将内容写入hdfs
FSDataOutputStream fsDataOutputStream = fs.create(new Path("/hadoop-2.7.2.zip"));
//2.获取一个文件的输入流--将内容从本地读入内存
FileInputStream fileInputStream = new FileInputStream("E:\\ZJJ\\01_win10下编译过的hadoop jar包\\hadoop-2.7.2.zip");
//3.操作执行
IOUtils.copyBytes(fileInputStream, fsDataOutputStream, 4096);
//4.IOUtils自带关闭流
IOUtils.closeStream(fileInputStream);
IOUtils.closeStream(fsDataOutputStream);
}