/**
* @Description TODO
* @Author Zhaogw&Lss
* @Date 2019/9/17 14:35
* @Version 1.0
**/
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.io.IOUtils;
public class HdfsCrud {
public static void main(String[] args) throws IOException {
/*getDateNodeHost();*/
/* String s =args[0];
String d = args[1];
uploadLocalFile2HDFS(s,d);*/
/*String CreateFilePath = "/test";
String Content = "做一个测试";
createNewHDFSFile(CreateFilePath,Content);*/
/*String toCreateFilePath = "resut1.txt";
String localFilePath = "result.txt";
copytoHDFSFile(toCreateFilePath,localFilePath);*/
/* String dst = "resut1.txt";
deleteHDFSFile(dst);*/
/*String str = "/test11";
mkdir(str);*/
/*String str = "/test11";
deleDir(str);*/
String dir = "/user";
listAll(dir);
}
//文件系统连接到 hdfs的配置信息
private static Configuration getConf(){
// 创建配置实例
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://Master:9000");
return conf;
}
/**
* 获取HDFS集群上所有节点名称信息
* */
private static void getDateNodeHost() throws IOException {
// 获取连接配置实例
Configuration conf = getConf();
// 创建文件系统实例
FileSystem fs = FileSystem.get(conf);
DistributedFileSystem hdfs = (DistributedFileSystem)fs;
DatanodeInfo[] datanodeInfos = hdfs.getDataNodeStats();
for (int i = 0; i < datanodeInfos.length; i++) {
System.out.println("集群上的节点名称为"+datanodeInfos[i].getHostName());
}
hdfs.close();
fs.close();
}
/**
* uploadLocalFile2HDFS
*/
public static void uploadLocalFile2HDFS(String s, String d) throws IOException {
/**
* 创建文件系统实例
*/
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
Path local = new Path(s);
Path hdfs = new Path(d);
fs.copyFromLocalFile(local,hdfs);
fs.close();
}
/** create a new file in the hdfs.
* notice that the toCreateFilePath is the full path
* and write the content to the hdfs file. */
private static void createNewHDFSFile(String createFilePath, String content) throws IOException {
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
FSDataOutputStream pos = fs.create((new Path(createFilePath)));
pos.write(content.getBytes());
pos.close();
}
/**
* 复制本地文件到hdfs
*
*/
public static void copytoHDFSFile(String toCreateFilePath, String localFilePath) throws IOException {
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(localFilePath));
/**
* // 创建文件系统实例
*/
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
// 创建HDFS输出流实例
FSDataOutputStream os = fs.create(new Path(toCreateFilePath));
// 两种方式其中的一种一次读写一个字节数组
byte[] bys = new byte[128000000];
int len = 0;
while ((len = bis.read(bys)) != -1) {
os.write(bys, 0, len);
os.hflush();
} // 关闭连接
os.close();
fs.close();
}
/**
* 删除HDFS文件
* @param dst
*/
private static void deleteHDFSFile(String dst) throws IOException {
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
//创建路径实例
Path path = new Path(dst);
fs.delete(path,true);
fs.close();
}
/**
* 创建hdfs文件夹
* @param str
*/
private static void mkdir(String str) throws IOException {
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(new Path(str));
fs.close();
}
/**
* 删除hdfs文件夹
* @param str
*/
private static void deleDir(String str) throws IOException {
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(new Path(str),true);
fs.close();
}
/**
* 列出文件夹信息
* @param dir
* @throws IOException
*/
private static void listAll(String dir) throws IOException {
Configuration conf = getConf();
FileSystem fs = FileSystem.get(conf);
FileStatus[] fileStatuses = fs.listStatus(new Path(dir));
for (int i = 0; i < fileStatuses.length; i++) {
if (fileStatuses[i].isDirectory()){
System.out.println(fileStatuses[i].getPath().toString());
}
}
fs.close();
}
}
HDFS的编程api
最新推荐文章于 2023-10-18 14:06:45 发布