package com.lv;
import com.sun.xml.internal.ws.api.pipe.NextAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.codehaus.jackson.annotate.JsonSubTypes;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
/**
* 配置文件优先级
* <p>
* hdfs-default.xml<hdfs-site.xml<idea中resources文件中的hdfs-site.xml<configuration.set()进行的配置
*
* @author
* @date 2021/4/16 20:58
*/
public class HDFSClient {
private static FileSystem fileSystem;
public void init() throws Exception {
//获取集群地址
URI uri = new URI("hdfs://hadoop102:8020");
//创建配置文件
Configuration configuration = new Configuration();
//用户
String UserName = "lv";
//获取客户端对象
fileSystem = FileSystem.get(uri, configuration, UserName);
}
public void close() throws IOException {
//关闭链接
fileSystem.close();
}
//创建文件
@Test
public void test() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
fileSystem.mkdirs(new Path("/xiyou/huaguoshan"));
hdfsClient.close();
}
//文件的上传
@Test
public void testup() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
//boolean delSrc, boolean overwrite, Path src, Path dst
//delSrc是否删除原数据,overweight是否覆盖,文件本地路径,Hadoop上的路径
fileSystem.copyFromLocalFile(false, false, new Path("d:\\sunwukong.txt"), new Path("/xiyou/huaguoshan"));
hdfsClient.close();
}
//文件的下载
@Test
public void testget() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
//boolean delSrc, Path src, Path dst, boolean useRawLocalFileSystem
//delSrc是否删除源文件,src:hdfs文件路径,dst:本地目的文件路径,useRawLocalFileSystem 是否开启本地校验
fileSystem.copyToLocalFile(false, new Path("/xiyou/huaguoshan/sunwukong.txt"), new Path("d:\\"), false);
hdfsClient.close();
}
//文件删除
@Test
public void testdel() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
//Path var1, boolean var2
//var1 删除Hadoop中的文件路径 var2 是否递归删除
fileSystem.delete(new Path("/xiyou"), true);
hdfsClient.close();
}
//文件/目录的移动和更名
@Test
public void testrename() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
//Path var1, Path var2
//var1:原文件路径 var2:目的文件路径
fileSystem.rename(new Path("/sanguo/weiguo.txt"), new Path("/newweiguo.txt"));
hdfsClient.close();
}
//查看文件详细信息
@Test
public void testfiledata() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
//final Path f, final boolean recursive
//f:文件路径,recursive:是否递归
//返回一个迭代器
RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(new Path("/"), true);
while (iterator.hasNext()) {
LocatedFileStatus next = iterator.next();
System.out.println("==================================" + next.getPath().getName());
System.out.println(next.getPermission());
System.out.println(next.getOwner());
System.out.println(next.getGroup());
System.out.println(next.getLen());
System.out.println(next.getModificationTime());
System.out.println(next.getReplication());
System.out.println(next.getBlockSize());//获取文件名只有通过路径来获得
System.out.println(next.getPath().getName());
//获取存储的块位置如-adoop102 hadoop103 hadoop104
BlockLocation[] blockLocations = next.getBlockLocations();
System.out.println(Arrays.toString(blockLocations));
}
hdfsClient.close();
}
//查看文件类别
@Test
public void teststatus() throws Exception {
HDFSClient hdfsClient = new HDFSClient();
hdfsClient.init();
FileStatus[] fileStatuses = fileSystem.listStatus(new Path("/"));
for (FileStatus status : fileStatuses) {
if (status.isFile()) {
System.out.println("文件" + status.getPath().getName());
}
else
System.out.println("目录" + status.getPath().getName());
}
hdfsClient.close();
}
}
HDFS-api文件操作
最新推荐文章于 2023-04-24 21:00:46 发布