package com.yht.utils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.net.URI;
public class HdfsUtil {
public static Configuration conf = new Configuration();
//创建文件夹
public static boolean mkdirPath(String path) throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
boolean mkdirs = fs.mkdirs(new Path(path));
System.out.println((mkdirs)?"SUCCESSFUL":"DEFEAT");
fs.close();
return mkdirs;
}
//将文件复制到hdfs
public static void putToHdfs(String localFilePath,String hdfsFilePath)throws Exception{
conf.set("dfs.replication","3");
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
fs.copyFromLocalFile(new Path(localFilePath),new Path(hdfsFilePath));
fs.close();
}
//将文件下载到本地
public static void getToHdfs(String hdfsFilePath , String localFilePath)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
fs.copyToLocalFile(new Path(hdfsFilePath),new Path(localFilePath));
fs.close();
}
public static void delete(String hdfsPath,boolean isRecursion)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
//第二个参数是:是否递归删除,当要删除文件的时候,需要设定为true
fs.delete(new Path(hdfsPath),isRecursion);
fs.close();
}
//重命名
public static void rename(String oldName,String newName)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
fs.rename(new Path(oldName),new Path(newName));
fs.close();
}
//判断是否是文件还是文件夹
public static String judgeFileOrDirectory(String hdfsPath)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
boolean file = fs.isFile(new Path(hdfsPath));
return file?"File":"Directory";
}
//获取某文件夹下所有文件名
public static String[] getListHdfsFileName(String hdfsPath)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
FileStatus[] listStatus = fs.listStatus(new Path(hdfsPath));
String[] names = new String[listStatus.length];
int index = 0;
for(FileStatus status:listStatus){
String name = status.getPath().getName();
names[index] = name;
index++;
}
fs.close();
return names;
}
//利用IO流进行上传文件
public static void putToHdfsByIO(String localFilePath,String hdfsFilePath)throws Exception{
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
FileInputStream fis = new FileInputStream(new File(localFilePath));
FSDataOutputStream fss = fs.create(new Path(hdfsFilePath));
IOUtils.copyBytes(fis,fss,conf);
fs.close();
fis.close();
fss.close();
}
public static void getToHdfsByIO(String hdfsFilePath , String localFilePath)throws Exception {
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");
FSDataInputStream fsos = fs.open(new Path(hdfsFilePath));
FileOutputStream fos = new FileOutputStream(new File(localFilePath));
IOUtils.copyBytes(fsos,fos,conf);
fs.close();
fos.close();
fsos.close();
}
}