pom.xml
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.30</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.28</version>
</dependency>
</dependencies>
log4j.properties
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
创建 HdfsClient 类
public class HdfsClient {
private FileSystem fs;
@Before
public void init() throws IOException, URISyntaxException, InterruptedException {
//连接集群的nn地址
URI uri = new URI("hdfs://hadoop101:9000");
//创建一个配置文件
Configuration configuration = new Configuration();
//用户
String user = "root";
// 1 获取客户端对象
fs = FileSystem.get(uri, configuration, user);
}
@After
public void close() throws IOException {
// 3 关闭资源
fs.close();
}
@Test
public void testmkdir() throws URISyntaxException,IOException,InterruptedException {
// 2 创建一个文件夹
fs.mkdirs(new Path("/marvel/Avengers"));
}
// 上传
@Test
public void testCopyFromLocalFile() throws IOException,
InterruptedException, URISyntaxException {
// 1 获取文件系统
Configuration configuration = new Configuration();
configuration.set("dfs.replication", "2");
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"),
configuration, "root");
// 2 上传文件
fs.copyFromLocalFile(new Path("F:\\hadoop\\heihie.txt"), new
Path("/marvel/Avengers"));
// 3 关闭资源
fs.close();
}
@Test
public void testCopyToLocalFile() throws IOException,
InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"),
configuration, "root");
// 2 执行下载操作
// boolean delSrc 指是否将原文件删除
// Path src 指要下载的文件路径
// Path dst 指将文件下载到的路径
// boolean useRawLocalFileSystem 是否开启文件校验
fs.copyToLocalFile(false, new
Path("/marvel/Avengers/wahaha.txt"), new Path("F:\\hadoop\\enen.txt"),
true);
// 3 关闭资源
fs.close();
}
// 删除
@Test
public void testRm() throws IOException {
// 参数解读:参数1:要删除的路径; 参数2 : 是否递归删除
// 删除文件(不再演示了)
fs.delete(new Path("/jdk-8u333-linux-x64.tar.gz"),false);
// 删除空目录
fs.delete(new Path("/Av"), false);
// 删除非空目录
fs.delete(new Path("/wa"), true);
}
// 文件的更名和移动
@Test
public void testmv() throws IOException {
// 参数解读:参数1 :原文件路径; 参数2 :目标文件路径
// 对文件名称的修改
//fs.rename(new Path("/move/from.txt"), new Path("/move/new.txt"));
// 文件的移动和更名
//fs.rename(new Path("/move/new.txt"),new Path("/to.txt"));
// 目录更名
fs.rename(new Path("/move"), new Path("/shift"));
}
// 获取文件详细信息
@Test
public void fileDetail() throws IOException {
// 获取所有文件信息
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
// 遍历文件
while (listFiles.hasNext()) {
LocatedFileStatus fileStatus = listFiles.next();
System.out.println("==========" + fileStatus.getPath() + "=========");
System.out.println(fileStatus.getPermission());
System.out.println(fileStatus.getOwner());
System.out.println(fileStatus.getGroup());
System.out.println(fileStatus.getLen());
System.out.println(fileStatus.getModificationTime());
System.out.println(fileStatus.getReplication());
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getPath().getName());
// 获取块信息
BlockLocation[] blockLocations = fileStatus.getBlockLocations();
System.out.println(Arrays.toString(blockLocations));
}
}
}