大数据实验二熟悉常用的HDFS操作----代码

2120011013刘百胜

(一)编程实现以下功能,并利用Hadoop提供的Shell命令完成相同任务。

(1)向HDFS中上传任意文本文件,如果指定的文件在HDFS中已经存在,由用户指定是追加到原有文件末尾还是覆盖原有的文件;

package two;

import java.io.FileInputStream;

 

public class CopyFromLocalFile {

   

   public static boolean test(Configuration conf, String path) {

       try (FileSystem fs = FileSystem.get(conf)) {

           return fs.exists(new Path(path));

       } catch (IOException e) {

           e.printStackTrace();

           return false;

       }

   }

 


   public static void copyFromLocalFile(Configuration conf,

           String localFilePath, String remoteFilePath) {

       Path localPath = new Path(localFilePath);

       Path remotePath = new Path(remoteFilePath);

       try (FileSystem fs = FileSystem.get(conf)) {

           /* fs.copyFromLocalFile 第一个参数表示是否删除源文件,第二个参数表示是否覆盖 */

           fs.copyFromLocalFile(false, true, localPath, remotePath);

       } catch (IOException e) {

           e.printStackTrace();

       }

 

   }

 

   

   public static void appendToFile(Configuration conf, String localFilePath,

           String remoteFilePath) {

       Path remotePath = new Path(remoteFilePath);

       try (FileSystem fs = FileSystem.get(conf);

               FileInputStream in = new FileInputStream(localFilePath);) {

           FSDataOutputStream out = fs.append(remotePath);

           byte[] data = new byte[1024];

           int read = -1;

           while ((read = in.read(data)) > 0) {

               out.write(data, 0, read);

           }

           out.close();

       } catch (IOException e) {

           e.printStackTrace();

       }

   }

 

  

   public static void main(String[] args) {

       Configuration conf = new Configuration();

       conf.set("fs.defaultFS", "hdfs://localhost:9000");

       String localFilePath = "/usr/local/hadoop/lbs.txt"; // 本地路径

       String remoteFilePath = "/user/hadoop/lbs.txt"; // HDFS路径

       // String choice = "append"; // 若文件存在则追加到文件末尾

       String choice = "overwrite"; // 若文件存在则覆盖

 

       try {

           

           boolean fileExists = false;

           if (CopyFromLocalFile.test(conf, remoteFilePath)) {

               fileExists = true;

               System.out.println(remoteFilePath + " 已存在.");

           } else {

               System.out.println(remoteFilePath + " 不存在.");

           }

          

           if (!fileExists) { 

               CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,

                       remoteFilePath);

               System.out.println(localFilePath + " 已上传至 " + remoteFilePath);

           } else if (choice.equals("overwrite")) { // 选择覆盖

               CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,

                       remoteFilePath);

               System.out.println(localFilePath + " 已覆盖 " + remoteFilePath);

           } else if (choice.equals("append")) { // 选择追加

               CopyFromLocalFile.appendToFile(conf, localFilePath,

                       remoteFilePath);

               System.out.println(localFilePath + " 已追加至 " + remoteFilePath);

           }

       } catch (Exception e) {

           e.printStackTrace();

       }

   }

}

(2)从HDFS中下载指定文件,如果本地文件与要下载的文件名称相同,则自动对下载的文件重命名;

package two;

import org.apache.hadoop.conf.Configuration;

 

public class CopyToLocal {


    public static void copyToLocal(Configuration conf, String remoteFilePath,

            String localFilePath) {

        Path remotePath = new Path(remoteFilePath);

        try (FileSystem fs = FileSystem.get(conf)) {

            File f = new File(localFilePath);

            if (f.exists()) {

                System.out.println(localFilePath + " 已存在.");

                Integer i = Integer.valueOf(0);

                while (true) {

                    f = new File(localFilePath + "_" + i.toString());

                    if (!f.exists()) {

                        localFilePath = localFilePath + "_" + i.toString();

                        break;

                    } else {

                        i++;

                        continue;

                    }

                }

                System.out.println("将重新命名为: " + localFilePath);

            }

            // 下载文件到本地

            Path localPath = new Path(localFilePath);

            fs.copyToLocalFile(remotePath, localPath);

        } catch (IOException e) {

            // TODO Auto-generated catch block

            e.printStackTrace();

        }

    }

 



    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String localFilePath = "/usr/local/hadoop/no2.txt"; // 本地路径

        String remoteFilePath = "/user/hadoop/lbs.txt"; // HDFS路径

 

        try {

            CopyToLocal.copyToLocal(conf, remoteFilePath, localFilePath);

            System.out.println("下载完成");

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(3)将HDFS中指定文件的内容输出到终端中;

package two;

import org.apache.hadoop.conf.Configuration;

 

public class Cat {

    

    public static void cat(Configuration conf, String remoteFilePath) {

        Path remotePath = new Path(remoteFilePath);

        try (FileSystem fs = FileSystem.get(conf);

                FSDataInputStream in = fs.open(remotePath);

                BufferedReader d = new BufferedReader(new InputStreamReader(in));) {

            String line;

            while ((line = d.readLine()) != null) {

                System.out.println(line);

            }

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteFilePath = "/user/hadoop/lbs.txt"; // HDFS路径

 

        try {

            System.out.println("读取文件: " + remoteFilePath);

            Cat.cat(conf, remoteFilePath);

            System.out.println("\n读取完成");

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(4)显示HDFS中指定的文件的读写权限、大小、创建时间、路径等信息;

package two;

import org.apache.hadoop.conf.Configuration;

 

public class List {

    

    public static void ls(Configuration conf, String remoteFilePath) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path remotePath = new Path(remoteFilePath);

            FileStatus[] fileStatuses = fs.listStatus(remotePath);

            for (FileStatus s : fileStatuses) {

                System.out.println("路径: " + s.getPath().toString());

                System.out.println("权限: " + s.getPermission().toString());

                System.out.println("大小: " + s.getLen());

                /* 返回的是时间戳,转化为时间日期格式 */

                long timeStamp = s.getModificationTime();

                SimpleDateFormat format = new SimpleDateFormat(

                        "yyyy-MM-dd HH:mm:ss");

                String date = format.format(timeStamp);

                System.out.println("时间: " + date);

            }

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

   

    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteFilePath = "/user/hadoop/lbs.txt"; // HDFS路径

 

        try {

            System.out.println("读取文件信息: " + remoteFilePath);

            List.ls(conf, remoteFilePath);

            System.out.println("\n读取完成");

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(5)给定HDFS中某一个目录,输出该目录下的所有文件的读写权限、大小、创建时间、路径等信息,如果该文件是目录,则递归输出该目录下所有文件相关信息;

package two;


import org.apache.hadoop.conf.Configuration;

 

public class ListDir {


    public static void lsDir(Configuration conf, String remoteDir) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path dirPath = new Path(remoteDir);

         

            RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(

                    dirPath, true);

       

            while (remoteIterator.hasNext()) {

                FileStatus s = remoteIterator.next();

                System.out.println("路径: " + s.getPath().toString());

                System.out.println("权限: " + s.getPermission().toString());

                System.out.println("大小: " + s.getLen());

               
                Long timeStamp = s.getModificationTime();

                SimpleDateFormat format = new SimpleDateFormat(

                        "yyyy-MM-dd HH:mm:ss");

                String date = format.format(timeStamp);

                System.out.println("时间: " + date);

                System.out.println();

            }

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

   

    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteDir = "/user/hadoop"; // HDFS路径

 

        try {

            System.out.println("(递归)读取目录下所有文件的信息: " + remoteDir);

            ListDir.lsDir(conf, remoteDir);

            System.out.println("读取完成");

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(6)提供一个HDFS内的文件的路径,对该文件进行创建和删除操作。如果文件所在目录不存在,则自动创建目录;

package two;

import org.apache.hadoop.conf.Configuration;

 

public class RemoveOrMake {

   

    public static boolean test(Configuration conf, String path) {

        try (FileSystem fs = FileSystem.get(conf)) {

            return fs.exists(new Path(path));

        } catch (IOException e) {

            e.printStackTrace();

            return false;

        }

    }

 

  

    public static boolean mkdir(Configuration conf, String remoteDir) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path dirPath = new Path(remoteDir);

            return fs.mkdirs(dirPath);

        } catch (IOException e) {

            e.printStackTrace();

            return false;

        }

    }

 

   

    public static void touchz(Configuration conf, String remoteFilePath) {

        Path remotePath = new Path(remoteFilePath);

        try (FileSystem fs = FileSystem.get(conf)) {

            FSDataOutputStream outputStream = fs.create(remotePath);

            outputStream.close();

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

   

    public static boolean rm(Configuration conf, String remoteFilePath) {

        Path remotePath = new Path(remoteFilePath);

        try (FileSystem fs = FileSystem.get(conf)) {

            return fs.delete(remotePath, false);

        } catch (IOException e) {

            e.printStackTrace();

            return false;

        }

    }

 



    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteFilePath = "/user/hadoop/input/no6.txt"; 

        String remoteDir = "/user/hadoop/input"; 

 

        try {

            if (RemoveOrMake.test(conf, remoteFilePath)) {

                RemoveOrMake.rm(conf, remoteFilePath); // 删除

                System.out.println("删除文件: " + remoteFilePath);

            } else {

                if (!RemoveOrMake.test(conf, remoteDir)) { // 若目录不存在,则进行创建

                    RemoveOrMake.mkdir(conf, remoteDir);

                    System.out.println("创建文件夹: " + remoteDir);

                }

                RemoveOrMake.touchz(conf, remoteFilePath);

                System.out.println("创建文件: " + remoteFilePath);

            }

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(7)提供一个HDFS的目录的路径,对该目录进行创建和删除操作。创建目录时,如果目录文件所在目录不存在则自动创建相应目录;删除目录时,由用户指定当该目录不为空时是否还删除该目录;

package two;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.*;

public class HDFSApi7 {
    
    public static boolean test(Configuration conf, String path) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        return fs.exists(new Path(path));
    }

    
    public static boolean isDirEmpty(Configuration conf, String remoteDir) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Path dirPath = new Path(remoteDir);
        RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(dirPath, true);
        return !remoteIterator.hasNext();
    }

    
    public static boolean mkdir(Configuration conf, String remoteDir) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Path dirPath = new Path(remoteDir);
        boolean result = fs.mkdirs(dirPath);
        fs.close();
        return result;
    }

    
    public static boolean rmDir(Configuration conf, String remoteDir) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Path dirPath = new Path(remoteDir);
        /* 第二个参数表示是否递归删除所有文件 */
        boolean result = fs.delete(dirPath, true);
        fs.close();
        return result;
    }

  
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://localhost:9000");
        String remoteDir = "/user/xusheng/dir1/dir2";    // HDFS目录
        Boolean forceDelete = false;  // 是否强制删除

        try {
            
            if ( !HDFSApi7.test(conf, remoteDir) ) {
                HDFSApi7.mkdir(conf, remoteDir); // 创建目录
                System.out.println("创建目录: " + remoteDir);
            } else {
                if ( HDFSApi7.isDirEmpty(conf, remoteDir) || forceDelete ) { 
                    HDFSApi7.rmDir(conf, remoteDir);
                    System.out.println("删除目录: " + remoteDir);
                } else  { // 目录不为空
                    System.out.println("目录不为空,不删除: " + remoteDir);
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(8)向HDFS中指定文件追加内容,由用户指定内容追加到原有文件开头或结尾;

package two;

import org.apache.hadoop.conf.Configuration;

 

public class AppendToFile {

   

    public static boolean test(Configuration conf, String path) {

        try (FileSystem fs = FileSystem.get(conf)) {

            return fs.exists(new Path(path));

        } catch (IOException e) {

            e.printStackTrace();

            return false;

        }

    }

 


    public static void appendContentToFile(Configuration conf, String content,

            String remoteFilePath) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path remotePath = new Path(remoteFilePath);

            /* 创建一个文件输出流,输出的内容将追加到文件末尾 */

            FSDataOutputStream out = fs.append(remotePath);

            out.write(content.getBytes());

            out.close();

        } catch (IOException e) {

            e.printStackTrace();

        }

 

    }

 


    public static void appendToFile(Configuration conf, String localFilePath,

            String remoteFilePath) {

        Path remotePath = new Path(remoteFilePath);

        try (FileSystem fs = FileSystem.get(conf);

                FileInputStream in = new FileInputStream(localFilePath);) {

            FSDataOutputStream out = fs.append(remotePath);

            byte[] data = new byte[1024];

            int read = -1;

            while ((read = in.read(data)) > 0) {

                out.write(data, 0, read);

            }

            out.close();

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 


    public static void moveToLocalFile(Configuration conf,

            String remoteFilePath, String localFilePath) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path remotePath = new Path(remoteFilePath);

            Path localPath = new Path(localFilePath);

            fs.moveToLocalFile(remotePath, localPath);

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

    public static void touchz(Configuration conf, String remoteFilePath) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path remotePath = new Path(remoteFilePath);

            FSDataOutputStream outputStream = fs.create(remotePath);

            outputStream.close();

        } catch (IOException e) {

            e.printStackTrace();

        }

    }

 

    /**

     * 主函数

     */

    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteFilePath = "/1.txt"; // HDFS文件

        String content = "新追加的内容\n";

        String choice = "after"; // 追加到文件末尾

        // String choice = "before"; // 追加到文件开头

 

        try {

          

            if (!AppendToFile.test(conf, remoteFilePath)) {

                System.out.println("文件不存在: " + remoteFilePath);

            } else {

                if (choice.equals("after")) { // 追加在文件末尾

                    AppendToFile.appendContentToFile(conf, content,

                            remoteFilePath);

                    System.out.println("已追加内容到文件末尾" + remoteFilePath);

                } else if (choice.equals("before")) { // 追加到文件开头

                   

                    String localTmpPath = "/user/hadoop/tmp.txt";

                    AppendToFile.moveToLocalFile(conf, remoteFilePath,

                            localTmpPath); // 移动到本地

                    AppendToFile.touchz(conf, remoteFilePath); // 创建一个新文件

                    AppendToFile.appendContentToFile(conf, content,

                            remoteFilePath); // 先写入新内容

                    AppendToFile.appendToFile(conf, localTmpPath,

                            remoteFilePath); // 再写入原来内容

                    System.out.println("已追加内容到文件开头: " + remoteFilePath);

                }

            }

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(9)删除HDFS中指定的文件;

package two;

import org.apache.hadoop.conf.Configuration;

public class HDFSApi9 {
   
    public static boolean rm(Configuration conf, String remoteFilePath) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Path remotePath = new Path(remoteFilePath);
        boolean result = fs.delete(remotePath, false);
        fs.close();
        return result;
    }

   
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://localhost:9000");
        String remoteFilePath = "/user/hadoop/input/lbs.txt";    // HDFS文件

        try {
            if ( HDFSApi9.rm(conf, remoteFilePath) ) {
                System.out.println("文件删除: " + remoteFilePath);
            } else {
                System.out.println("操作失败(文件不存在或删除失败)");
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(11)在HDFS中,将文件从源路径移动到目的路径。

package two;

import org.apache.hadoop.conf.Configuration;

 

public class MoveFile {

   

    public static boolean mv(Configuration conf, String remoteFilePath,

            String remoteToFilePath) {

        try (FileSystem fs = FileSystem.get(conf)) {

            Path srcPath = new Path(remoteFilePath);

            Path dstPath = new Path(remoteToFilePath);

            return fs.rename(srcPath, dstPath);

        } catch (IOException e) {

            e.printStackTrace();

            return false;

        }

    }

 

    

    public static void main(String[] args) {

        Configuration conf = new Configuration();

        conf.set("fs.defaultFS", "hdfs://localhost:9000");

        String remoteFilePath = "hdfs:///user/hadoop/lbs.txt"; // 源文件HDFS路径

        String remoteToFilePath = "hdfs:///user/hadoop/input"; // 目的HDFS路径

 

        try {

            if (MoveFile.mv(conf, remoteFilePath, remoteToFilePath)) {

                System.out.println("将文件 " + remoteFilePath + " 移动到 "

                        + remoteToFilePath);

            } else {

                System.out.println("操作失败(源文件不存在或移动失败)");

            }

        } catch (Exception e) {

            e.printStackTrace();

        }

    }

}

(二)编程实现一个类“MyFSDataInputStream”,该类继承“org.apache.hadoop.fs.FSDataInputStream”,要求如下:实现按行读取HDFS中指定文件的方法“readLine()”,如果读到文件末尾,则返回空,否则返回文件一行的文本。


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.*;

public class MyFSDataInputStream extends FSDataInputStream {
    public MyFSDataInputStream(InputStream in) {
        super(in);
    }

    public static String readline(BufferedReader br) throws IOException {
        char[] data = new char[1024];
        int read = -1;
        int off = 0;

        while ( (read = br.read(data, off, 1)) != -1 ) {
            if (String.valueOf(data[off]).equals("\n") ) {
                off += 1;
                break;
            }
            off += 1;
        }

        if (off > 0) {
            return String.valueOf(data);
        } else {
            return null;
        }
    }

    
    public static void cat(Configuration conf, String remoteFilePath) throws IOException {
        FileSystem fs = FileSystem.get(conf);
        Path remotePath = new Path(remoteFilePath);
        FSDataInputStream in = fs.open(remotePath);
        BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String line = null;
        while ( (line = MyFSDataInputStream.readline(br)) != null ) {
            System.out.println(line);
        }
        br.close();
        in.close();
        fs.close();
    }

  
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://localhost:9000");
        String remoteFilePath = "/user/hadoop/lbs.txt";    // HDFS路径
        try {
            MyFSDataInputStream.cat(conf, remoteFilePath);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

(三)查看Java帮助手册或其它资料,用“java.net.URL”和“org.apache.hadoop.fs.FsURLStreamHandlerFactory”编程完成输出HDFS中指定文件的文本到终端中。



import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import java.io.*;
import java.net.URL;

public class HDFSApi11 {
    static{
        URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
    }

  
    public static void main(String[] args) throws Exception {
        String remoteFilePath = "hdfs://localhost:9000//user/hadoop/lbs.txt";    // HDFS文件
        InputStream in = null;
        try{
      
            in = new URL(remoteFilePath).openStream();
            IOUtils.copyBytes(in,System.out,4096,false);
        } finally{
            IOUtils.closeStream(in);
        }
    }
}

以下是实现MyFSDataInputStream的代码: ```java import java.io.IOException; import java.io.InputStreamReader; import java.util.LinkedList; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; public class MyFSDataInputStream extends FSDataInputStream { private static final int DEFAULT_BUFFER_SIZE = 1024; private byte[] buffer; private int bufferPos; private int bufferSize; private InputStreamReader reader; private LinkedList<String> lines; public MyFSDataInputStream(FileSystem fs, Path file) throws IOException { super(fs.open(file)); buffer = new byte[DEFAULT_BUFFER_SIZE]; bufferPos = 0; bufferSize = 0; reader = new InputStreamReader(this); lines = new LinkedList<>(); } public String readLine() throws IOException { if (lines.isEmpty()) { String line = null; int b = -1; while ((b = read()) != -1) { char c = (char) b; if (c == '\n') { if (line == null) { return ""; } else { lines.add(line); return line; } } else { if (line == null) { line = ""; } line += c; } } if (line == null) { return null; } else { lines.add(line); return line; } } else { return lines.removeFirst(); } } @Override public synchronized int read() throws IOException { if (bufferPos >= bufferSize) { bufferSize = super.read(buffer); bufferPos = 0; } if (bufferSize == -1) { return -1; } else { return buffer[bufferPos++]; } } } ``` 以下是使用MyFSDataInputStream实现从HDFS中读取文件并输出到终端的代码: ```java import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FsUrlStreamHandlerFactory; public class HDFSFileReader { public static void main(String[] args) throws Exception { String hdfsFile = "hdfs://localhost:9000/user/hadoop/test.txt"; URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory()); URL url = new URL(hdfsFile); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(url.toURI(), conf); Path path = new Path(url.getPath()); MyFSDataInputStream in = new MyFSDataInputStream(fs, path); BufferedReader reader = new BufferedReader(in); String line = null; while ((line = reader.readLine()) != null) { System.out.println(line); } reader.close(); in.close(); fs.close(); } } ``` 在以上代码中,我们首先使用`URL.setURLStreamHandlerFactory()`方法将`FsURLStreamHandlerFactory`注册为`URL`的URL流处理程序工厂,然后使用`FileSystem.get()`方法从HDFS中获取文件的`FileSystem`实例。接着,我们使用`MyFSDataInputStream`读取文件内容,并使用`BufferedReader`按行读取文件内容并输出到终端。最后,我们需要关闭所有打开的流和文件系统实例。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值