实验二 熟悉HDFS常用操作

(1) 将HDFS中指定文件的内容输出到终端中

hadoop fs -ls / # show files
read -p "Please input file you want to output: " fileName # input
if hadoop fs -test -e /$fileName # test if this file exist
then
    hadoop fs -cat /$fileName # output file
else
    echo "The file doesn't exist, output failed." # output error msg
fi
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Scanner;

public class test1 {
    public static void write( FileSystem fs,Path filePath) throws IOException {
        BufferedReader bf;
        try (FSDataInputStream inputStream = fs.open(filePath)) {
            bf = new BufferedReader(new InputStreamReader(inputStream));
            String line;
            while ((line = bf.readLine()) != null)
                System.out.println(line);
        }
        bf.close();
        System.out.println("\nOutput end.");
    }
    public static void main(String[] args) {
        try {
            System.out.println("Please input file you want to output: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                write(fs,filePath);
            } else {
                System.out.println("The file doesn't exist, output failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(2) 显示HDFS中指定的文件的读写权限、大小、创建时间、路径等信息;

hadoop fs -ls / # show files
read -p "Please input file you want to show details: " fileName # input
if hadoop fs -test -e /$fileName # test if this file exist
   then
   hadoop fs -ls -h /$fileName # output details
else
   echo "The file doesn't exist, output failed." # output error msg
fi
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.util.Scanner;

public class test2 {
    public static void main(String[] args) {
        try {
            System.out.println("Please input file you want to output: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                System.out.println(fs.getFileStatus(filePath).toString());
            } else {
                System.out.println("The file doesn't exist, output failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}


(3) 给定HDFS中某一个目录,输出该目录下的所有文件的读写权限、大小、创建时间、路径等信息,如果该文件是目录,则递归输出该目录下所有文件相关信息;

read -p "Please input dir you want to show details: " dirPath
hadoop fs -ls -R $dirPath
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.Scanner;

public class test3 {
    public static void lsr(FileSystem fs, Path filePath) throws IOException {
        FileStatus[] fileInfo = fs.listStatus(filePath);
        for (FileStatus i : fileInfo) {
            System.out.println(i.toString());
            if (i.isDirectory())
                lsr(fs, i.getPath());
        }
    }

    public static void main(String[] args) {
        try {
            System.out.println("Please input file you want to output: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                lsr(fs, filePath);
            } else {
                System.out.println("The file doesn't exist, output failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(4) 提供一个HDFS内的文件的路径,对该文件进行创建和删除操作。如果文件所在目录不存在,则自动创建目录;

read -p "Please input dir's path: " path

if !($(hadoop fs -test -e $path)); # test wether if the path exist
then
   hadoop fs -mkdir -p $path 
   echo "The path doesn't exist, created." 
   flag="True" # need to del
fi

read -p "Please input file's name: " fileName

hadoop fs -touchz $path/$fileName # create file

hadoop fs -rm -r $path/$fileName # del file

if test $flag = "True";
then
   hadoop fs -rm -r $path # del dir
fi


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.util.Scanner;

public class test4 {
    public static void main(String[] args) {
        try {
            System.out.println("Please input file's path: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);

            if (!fs.exists(filePath)) {
                fs.create(filePath);
                System.out.println("The dir has been successfully created.");
            } else System.out.println("The dir has already existed.");

            fs.delete(filePath, true);
            System.out.println("The dir has been successfully deleted.");

            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(5) 提供一个HDFS的目录的路径,对该目录进行创建和删除操作。创建目录时,如果目录文件所在目录不存在,则自动创建相应目录;删除目录时,由用户指定当该目录不为空时是否还删除该目录;

read -p "Please input dir's path: " path

if !($(hadoop fs -test -e $path)); # test wether if the path exist
then
   hadoop fs -mkdir -p $path 
   echo "The path doesn't exist, created." 
fi

if $(hadoop fs -test -z $path) ;# test wether if the dir empty
then
    hadoop fs -rmdir $path
else 
    
    while read -p "The dir is not empty, do you still want to delete ? (y/n): " choice
    do
        if test $choice = "y";
        then 
            break
        elif test $choice = "n";
        then
            break
        else 
            echo "Input error! Please input 'y' or 'n'. "
        fi
    done # read user choice until it's legal

    if test $choice = "y";
    then 
        hadoop fs -rm -r $path
    fi            
fi


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.util.Scanner;

public class test5 {
    public static void main(String[] args) {
        try {
            System.out.println("Please input file's path: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);

            if (!fs.exists(filePath)) {
                fs.create(filePath);
                System.out.println("The dir has been successfully created.");
            } else System.out.println("The dir has already existed.");

            fs.delete(filePath, true);
            System.out.println("The dir has been successfully deleted.");

            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(6) 向HDFS中指定的文件追加内容,由用户指定内容追加到原有文件的开头或结尾;

read -p "Please input file's path: " path

if !($(hadoop fs -test -e $path)); # test wether if the path exist
then
   hadoop fs -mkdir -p $path 
   echo "The path doesn't exist, created." 
fi

while read -p "Where do you want to insert content? (front/back): " choice
   do
      if test $choice = "front";
      then 
         break
      elif test $choice = "back";
      then
         break
      else 
         echo "Input error! Please input 'front' or 'back'. "
      fi
   done # read user choice until it's legal


if [ $choice == "front" ]
then
   hadoop fs -touchz tempFile
   echo "Please input content, end with ctrl + c"
   hadoop fs -appendToFile - tempFile # store user's input
   hadoop fs -appendToFile $path tempFile # 
   hadoop fs -rm $path # del ori file
   hadoop fs -mv tempFile $path
else
   echo "Please input content, end with ctrl + c"
   hadoop fs -appendToFile - $path
fi
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Scanner;

public class test6 {

    protected static void shear(FileSystem fs, Path from, Path to) throws IOException {
        try (FSDataOutputStream opt = fs.append(to)) {
            BufferedReader bf;
            try (FSDataInputStream inputStream = fs.open(from)) {
                bf = new BufferedReader(new InputStreamReader(inputStream));
                String line;
                while ((line = bf.readLine()) != null)
                    if (!line.isBlank()) opt.writeChars(line + "\n");
            }
            bf.close();
        }
        fs.delete(from, true);
        fs.rename(to, from);
    }

    public static void main(String[] args) {
        try {
            System.out.println("Please input file you want to insert content: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                System.out.println("Where do you want to insert content? (front/back): ");

                String choice = in.next();
                while (!choice.equals("front") && !choice.equals("back")) {
                    System.out.println("Input error! Please input 'front' or 'back'. ");
                    choice = in.next();
                }

                System.out.println("Please input content, end with ctrl + d");
                if (choice.equals("front")) {
                    Path tmpPath = new Path("tempFile");
                    FSDataOutputStream opt = fs.create(tmpPath);
                    while (in.hasNext()) {
                        String line = in.nextLine();
                        if (!line.isBlank()) opt.writeChars(line + '\n');
                    }
                    opt.close();
                    //shear(fs, filePath, tmpPath);
                } else {
                    FSDataOutputStream opt = fs.append(filePath);
                    while (in.hasNext()) {
                        String line = in.nextLine();
                        if (!line.isBlank()) opt.writeChars(line + '\n');
                    }
                }
                System.out.println("Successfully inserted.");
            } else {
                System.out.println("The file doesn't exist, insert failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(7) 删除HDFS中指定的文件;

read -p "Please input file's path you want to move from: " srcPath
read -p "Please input file's path you want to move to: " dstPath
hadoop fs -mv  $srcPath $dstPath
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.util.Scanner;

public class test7 {
    public static void main(String[] args) {
        try {
            System.out.println("Please input file's path: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);

            if (!fs.exists(filePath)) {
                System.out.println("The path does’t existed.");
            } else {
                fs.delete(filePath, true);
                System.out.println("The dir has been successfully deleted.");
            }

            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(8) 在HDFS中,将文件从源路径移动到目的路径。

read -p "Please input file's path you want to move from: " sourcePath
read -p "Please input file's path you want to move to: " targetPath
hadoop fs -mv  $sourcePath $targetPath
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.util.Scanner;

public class test8 {
    public static void main(String[] args) {
        try {
            Scanner in = new Scanner(System.in);

            System.out.println("Please input src file's path: ");
            Path srcPath = new Path(in.next());
            System.out.println("Please input dst file's path: ");
            Path dstPath = new Path(in.next());

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
            FileSystem fs = FileSystem.get(conf);

            if (!fs.exists(srcPath)) {
                System.out.println("src path does’t existed.");
            } else {
                fs.rename(srcPath, dstPath);
                System.out.println("The dir has been successfully removed.");
            }

            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(二)编程实现一个类“MyFSDataInputStream”,该类继承“org.apache.hadoop.fs.FSDataInputStream”,要求如下:实现按行读取HDFS中指定文件的方法“readLine()”,如果读到文件末尾,则返回空,否则返回文件一行的文本。

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Scanner;

public class MyFSDataInputStream extends FSDataInputStream {

    public MyFSDataInputStream(InputStream in) {
        super(in);
    }
    public static String readLine(FileSystem fs, Path from) throws IOException {
        String res;
        try (FSDataInputStream fileIn = fs.open(from)) {
            res  = new BufferedReader(new InputStreamReader(fileIn)).readLine();
        }
        return res;
    }
    public static void main(String []args){
        try {
            System.out.println("Please input file you want to output: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next();

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                System.out.println(readLine(fs,filePath));
            } else {
                System.out.println("The file doesn't exist, output failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(三)查看Java帮助手册或其它资料,用“java.net.URL”和“org.apache.hadoop.fs.FsURLStreamHandlerFactory”编程完成输出HDFS中指定文件的文本到终端中。

import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.Scanner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.fs.Path;

import static org.apache.hadoop.io.IOUtils.*;

public class printFile {
    static {
        URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
    }

    public static void cat(String filePath) throws IOException {
        try (InputStream in = new URL("hdfs", "localhost", 9000, filePath).openStream()) {
            copyBytes(in, System.out, 4096, true);
        }
    }

    public static void main(String[] args) {

        try {
            System.out.println("Please input file's absolute path you want to output: ");
            Scanner in = new Scanner(System.in);
            String fileName = in.next(); // "/user/bernard/1.txt"

            Configuration conf = new Configuration();
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

            FileSystem fs = FileSystem.get(conf);
            Path filePath = new Path(fileName);
            if (fs.exists(filePath)) {
                cat(fileName);
            } else {
                System.out.println("The file doesn't exist, output failed.");
            }
            in.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

  • 3
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Bernard5

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值