目录结构
代码展示
package com.ny.hdfs;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestHdfs {
//读取配置文件信息
Configuration conf;
//客户端
FileSystem fs ;
@Before // 是在所拦截单元测试方法执行之前执行一段逻辑,读艾特Before
public void conn() throws Exception {
// 创建读取xml文件的对象 true表示读取配置文件,false不读取
conf = new Configuration(true);
//创建客户端对象(FileSystem ctrl +t)
fs = FileSystem.get(conf);
}
@After
public void close() throws Exception {
fs.close();
}
//创建目录
@Test
public void mkdir() throws Exception {
Path catalog = new Path("/java");
//以下做出判断
if(fs.exists(catalog)) {
//递归删除
fs.delete(catalog, true);
}else {
fs.mkdirs(catalog);
}
}
//上传文件
@Test
public void upload() {
try {
Path file = new Path("/java/helloworld.txt");
FSDataOutputStream output = fs.create(file);
//创建文件输入流
InputStream input = new BufferedInputStream(new FileInputStream(new File("D:\\testdata\\phone_data.txt")));
//hadoop提供的工具类
//用完关闭 true
IOUtils.copyBytes(input, output, conf, true);
} catch (Exception e) {
// TODO: handle exception
}
}
//下载文件
@Test
public void download() throws IOException {
//指明路径
Path file = new Path("/java/helloworld.txt");
//输出流 写入文件里面
FSDataInputStream open = fs.open(file);
//写出到文件 向本地写
OutputStream out = new BufferedOutputStream(new FileOutputStream(new File("D:\\testdata\\hello.txt")));
//流的整合和关联
IOUtils.copyBytes(open, out, conf, true);
}
/**
* [root@node01 hadoop]# hadoop fs -mkdir -p /user/root
[root@node01 ~]# hadoop fs -D dfs.blocksize=1048576 -put test.txt
*/
//获取块的信息 按字节进行移动
/*
0(偏移量),1048576(块的大小),node03,node02(位置信息)
1048576,1048576,node03,node04
2097152,872959,node03,node02
*/
@Test
public void block() {
try {
Path path = new Path("/user/root/test.txt");
FileStatus file = fs.getFileStatus(path);
BlockLocation[] blo= fs.getFileBlockLocations(file, 0, file.getLen());
for (BlockLocation b : blo) {
//打印出数组中块的信息
System.out.println(b);
}
} catch (Exception e) {
e.printStackTrace();
}
}
//获取hdfs系统的目录下所有文件
@Test
public void listAll() throws Exception{
FileStatus[] listStatus = fs.listStatus(new Path("/java"));
Path[] paths = FileUtil.stat2Paths(listStatus);
for (Path path : paths) {
System.out.println(path);
}
}
}