一、运行前提
已经搭建好全分布式hadoop集群
二、hdfs实例代码
package com.psn.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.checkerframework.dataflow.qual.TerminatesExecution;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
public class HdfsClient {
FileSystem fs;
@Before
public void before() throws URISyntaxException, IOException, InterruptedException {
URI uri = new URI("hdfs://hadoop102:8020");
Configuration configuration = new Configuration();
configuration.set("dfs.replication","3");
String user = "centos7";
fs = FileSystem.get(uri, configuration, user);
}
@After
public void after() throws IOException {
fs.close();
}
@Test
public void testMkdir() throws URISyntaxException, IOException {
fs.mkdirs(new Path("/zs"));
}
@Test
public void testput() throws IOException {
fs.copyFromLocalFile(false,true,new Path("d://tools/node-v14.16.0-x64.msi"),new Path("/zs"));
}
@Test
public void downFile() throws IOException {
fs.copyToLocalFile(false,new Path("/zs"),new Path("D://"),false);
}
@Test
public void rename() throws IOException {
fs.rename(new Path("/zs/node-v14.16.0-x641.msi"),new Path("/wcinput/node-v14.16.0-x64.msi"));
}
@Test
public void testDetailFile() throws IOException {
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/wcinput"), true);
while (listFiles.hasNext()){
LocatedFileStatus fileStatus = listFiles.next();
System.out.println("=================="+fileStatus.getPath().getName()+"=========================");
System.out.println(fileStatus.isFile());
System.out.println(fileStatus.getOwner());
System.out.println(fileStatus.getPermission());
System.out.println(fileStatus.getLen());
BlockLocation[] blockLocations = fileStatus.getBlockLocations();
System.out.println(Arrays.asList(blockLocations));
}
}
public void TestDel() throws IOException {
fs.delete(new Path("/zs"),true);
}
}