package com.zhangxy.hdfs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class HDFSClient {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
// //1获取文件系统
// Configuration configuration=new Configuration();
// configuration.set("fs.defaultFS", "hdfs://hadoop106:9000");
// FileSystem fs=FileSystem.get(configuration);
//
// //2.1上传文件
// fs.copyFromLocalFile(new Path("F:/hello.txt"), new Path("/hello.txt"));
//
//
// //3关闭资源
// fs.close();
//直接配置用户名和IP地址
Configuration configuration=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), configuration, "zhangxy");
fs.copyFromLocalFile(new Path("F:/hello.txt"),new Path("/hello2.txt"));
fs.close();
System.out.println("over");
}
}
package com.zhangxy.hdfs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class HDFSClient {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
// //1获取文件系统
// Configuration configuration=new Configuration();
// configuration.set("fs.defaultFS", "hdfs://hadoop106:9000");
// FileSystem fs=FileSystem.get(configuration);
//
// //2.1上传文件
// fs.copyFromLocalFile(new Path("F:/hello.txt"), new Path("/hello.txt"));
//
//
// //3关闭资源
// fs.close();
//直接配置用户名和IP地址
// Configuration configuration=new Configuration();
// FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), configuration, "zhangxy");
// fs.copyFromLocalFile(new Path("F:/hello.txt"),new Path("/hello2.txt"));
// fs.close();
// System.out.println("over");
}
@Test
public void initHDFS() throws IOException {
//获取文件系统
Configuration configuration=new Configuration();
FileSystem fs=FileSystem.get(configuration);
//打印文件系统到控制台
System.out.println(fs.toString());
}
}
测试参数优先级
参数优先级: 1、客户端代码中设置的值 2、classpath下的用户自定义配置文件 3、然后是服务器的默认配置
@Test
public void initHDFS() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration configuration=new Configuration();
configuration.set("dfs.replication", "1");
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), configuration, "zhangxy");
fs.copyFromLocalFile(new Path("F:/hello.txt"), new Path("/hello5.txt"));
//打印文件系统到控制台
fs.close();
System.out.println(fs.toString());
}
下载文件
@Test
public void getFileFromHdfs() throws IOException, InterruptedException, URISyntaxException {
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
fs.copyToLocalFile(false, new Path("/hello.txt"), new Path("F:/hello1.txt"), true);
fs.close();
}
//创建目录
@Test
public void mkdirAtHdfs() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
//操作
fs.mkdirs(new Path("/0906/daxian/banzhang/h"));
//关闭资源
fs.close();
}
//递归删除目录
@Test
public void deleteAtHdfs() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
//操作
fs.delete(new Path("/0906/"), true);
//关闭资源
fs.close();
}
//文件名重命名
@Test
public void renameAtHdfs() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
//操作
fs.rename(new Path("/hello2.txt"),new Path("/helloHdfs.txt"));
//关闭资源
fs.close();
}
package com.zhangxy.hdfs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.rmi.Remote;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.junit.Test;
public class HDFSClient {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
}
//查看文件详情
@Test
public void readListFiles() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
//获取文件详情
RemoteIterator<LocatedFileStatus> listFilesIterator=fs.listFiles(new Path("/"),true);
while(listFilesIterator.hasNext()) {
LocatedFileStatus status=listFilesIterator.next();
//输出文件名称
System.out.println(status.getPath().getName());
//输出文件长度
System.out.println(status.getLen());
//输出文件权限
System.out.println(status.getPermission());
//输出文件组
System.out.println(status.getGroup());
BlockLocation[] blockLocations=status.getBlockLocations();
for(BlockLocation blockLocation:blockLocations) {
String[] hosts =blockLocation.getHosts();
for(String host:hosts) {
System.out.println(host);
}
}
System.out.println("------班长分割线-------");
}
//关闭资源
fs.close();
}
//判断是文件还是文件夹
}
guigu
10
rw-r--r--
supergroup
hadoop106
hadoop107
hadoop108
------班长分割线-------
hello.txt
10
rw-r--r--
supergroup
hadoop107
hadoop106
hadoop108
------班长分割线-------
hello3.txt
10
rw-r--r--
supergroup
hadoop106
------班长分割线-------
hello4.txt
10
rw-r--r--
supergroup
hadoop106
hadoop107
hadoop108
------班长分割线-------
hello5.txt
10
rw-r--r--
supergroup
hadoop106
------班长分割线-------
helloHdfs.txt
10
rw-r--r--
supergroup
hadoop106
hadoop108
hadoop107
------班长分割线-------
jinlian.txt
28
rw-r--r--
supergroup
hadoop106
hadoop108
------班长分割线-------
//判断是文件还是文件夹
@Test
public void readListFiles() throws IOException, InterruptedException, URISyntaxException {
//获取文件系统
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI("hdfs://hadoop106:9000"), conf,"zhangxy");
//判断
FileStatus[] listStatus=fs.listStatus(new Path("/"));
for(FileStatus fileStatus:listStatus) {
//如果是文件
if(fileStatus.isFile()) {
System.out.println("f:"+fileStatus.getPath().getName());
}else {
System.out.println("d:"+fileStatus.getPath().getName());
}
}
//关闭资源
fs.close();
}