Java操作hdfs有两种方式,这一节主要是用java
客户端操作,主要是对hdfs的增删查改。
import java.io.IOException;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.RemoteBlockReader;
import java.net.URI;
import org.junit.Before;
import org.junit.Test;
public class hdfstest {
FileSystem fs = null;
@SuppressWarnings("deprecation")
@Before
public void init() throws IOException,URISyntaxException,InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://node1:9000");
// get方法取得文件操作系统的客户端实力对象
fs= FileSystem.get(new URI("hdfs://node1:9000"),conf,"hadoop");
}
// 上传文件
@Test
public void testUpload() throws IOException,InterruptedException{
fs.copyFromLocalFile(new Path("c:/hadoop.lib"), new Path("/hadoop.lib.copy") );
fs.close();}
// 创建新的目录
@Test
public void mkDirs() throws IOException {
boolean mkdirs = fs.mkdirs(new Path ("/testmkdir/aaa"));
System.out.println(mkdirs);}
// 删除目录
@Test
public void testdelete() throws IOException {
boolean flag = fs.delete(new Path("/testmkdir/aaa"));
System.out.println(flag);
}
// 查询文件
@Test
public void testLs() throws IOException {
FileStatus[] listatus =fs.listStatus(new Path("/"));
for(FileStatus list:listatus) {
System.out.println(list.getBlockSize());
System.out.println(list.getGroup());
System.out.println(list.getPath());
}
}
public static void main(String[] args)throws IOException,InterruptedException {}
}