Hdfs的JAVA客户端常用操作

package com.yht.utils;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.net.URI;


public class HdfsUtil  {


    public static Configuration conf = new Configuration();


    //创建文件夹
    public static boolean  mkdirPath(String path) throws Exception{

            FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

            boolean mkdirs = fs.mkdirs(new Path(path));

            System.out.println((mkdirs)?"SUCCESSFUL":"DEFEAT");

            fs.close();
            return mkdirs;

    }

    //将文件复制到hdfs
    public static void putToHdfs(String localFilePath,String hdfsFilePath)throws Exception{

        conf.set("dfs.replication","3");

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        fs.copyFromLocalFile(new Path(localFilePath),new Path(hdfsFilePath));

        fs.close();
    }

    //将文件下载到本地
    public static void getToHdfs(String hdfsFilePath , String localFilePath)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        fs.copyToLocalFile(new Path(hdfsFilePath),new Path(localFilePath));

        fs.close();

    }

    public static void delete(String hdfsPath,boolean isRecursion)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        //第二个参数是:是否递归删除,当要删除文件的时候,需要设定为true
        fs.delete(new Path(hdfsPath),isRecursion);

        fs.close();

    }

    //重命名
    public static void rename(String oldName,String newName)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        fs.rename(new Path(oldName),new Path(newName));

        fs.close();

    }

    //判断是否是文件还是文件夹
    public static String judgeFileOrDirectory(String hdfsPath)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        boolean file = fs.isFile(new Path(hdfsPath));

        return file?"File":"Directory";

    }

    //获取某文件夹下所有文件名
    public static String[] getListHdfsFileName(String hdfsPath)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        FileStatus[] listStatus = fs.listStatus(new Path(hdfsPath));

        String[] names = new String[listStatus.length];

        int index = 0;

        for(FileStatus status:listStatus){

            String name = status.getPath().getName();

            names[index] = name;

            index++;

        }

        fs.close();

        return names;

    }

    //利用IO流进行上传文件
    public static void putToHdfsByIO(String localFilePath,String hdfsFilePath)throws Exception{

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        FileInputStream fis = new FileInputStream(new File(localFilePath));

        FSDataOutputStream fss = fs.create(new Path(hdfsFilePath));

        IOUtils.copyBytes(fis,fss,conf);

        fs.close();
        fis.close();
        fss.close();

    }

    public static void getToHdfsByIO(String hdfsFilePath , String localFilePath)throws Exception {

        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop101:9000"), conf, "root");

        FSDataInputStream fsos = fs.open(new Path(hdfsFilePath));

        FileOutputStream fos = new FileOutputStream(new File(localFilePath));

        IOUtils.copyBytes(fsos,fos,conf);

        fs.close();
        fos.close();
        fsos.close();

    }
}

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值