hadoop-1 java 操作文件

hdfs的安全模式

安全模式是HDFS所处的一种特殊状态,在这种状态下,文件系统只接受只读数据请求,而不接受删除、修改等请求
  • HDFS启动时,DataNode向NameNode注册汇报。汇报可用的block状态。
  • NameNode判断有99.9%的block满足最小副本数(1,与设置的副本数无关)就会退出安全模式
  • HDFS启动时,默认30秒处于安全期,过了安全期,才可以对集群进行操作
  • 集群启动后每3秒发送一次心跳
hdfs dfsadmin -safemode  
Usage: hdfs dfsadmin [-safemode enter | leave | get | wait]

HDFS与javaAPI开发

Windows下hadoop环境变量配置

1、下载 hadoop并压缩到没有中文没有空格的目录下

2、然后在windows当中配置hadoop的环境变量
- HADOOP_HOME设置成对应的路径

3、然后将hadoop.dll文件拷贝到C:\Windows\System32
25

文件系统配置项的3种重载方式

   Configuration conf = new Configuration();
   conf.set("fs.defaultFS","hdfs://node01:8020");
   FileSystem fileSystem = FileSystem.get(conf);
	//第三个参数为用户
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
  Configuration conf = new Configuration();
  FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf);

新建,重命名、删除,上传

 @Test
    public void mkdir() throws IOException {
        //配置项
        Configuration conf = new Configuration();
        //设置需要连接的HDFS集群
        conf.set("fs.defaultFS","hdfs://node01:8020");
        //获得文件系统
        FileSystem fileSystem = FileSystem.get(conf);
        //调用方法
        fileSystem.mkdirs(new Path("/0519/mkdir/rec.txt"));
        //释放资源
        fileSystem.close();
    }
    @Test
    public void mkdir2() throws IOException, URISyntaxException, InterruptedException {
        //配置项
        Configuration conf = new Configuration();
        //设置需要连接的HDFS集群
        //conf.set("fs.defaultFS","hdfs://node01:8020");
        //获得文件系统
        FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
        //调用方法
        fileSystem.mkdirs(new Path("/0519/mkdir2"));
        //释放资源
        fileSystem.close();
    }
    @Test
    public void uploadFile() throws IOException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://node01:8020");
        FileSystem fileSystem = FileSystem.get(conf);
        fileSystem.copyFromLocalFile(new Path("file:///F:\\BigData\\data\\testData.txt"),
                new Path("hdfs://node01:8020/0519"));

        fileSystem.close();
    }
    @Test
    public void downloadFile() throws IOException{
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://node01:8020");
        FileSystem fileSystem = FileSystem.get(conf);
        fileSystem.copyToLocalFile(false,new Path("hdfs://node01:8020//0519")
                ,new Path("file:///F:\\BigData\\data"));
        fileSystem.close();
    }

    @Test
    public void deleteFile() throws IOException{
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://node01:8020");
        FileSystem fileSystem = FileSystem.get(conf);
        fileSystem.delete(new Path("hdfs://node01:8020/0519"),true);
        fileSystem.close();
    }

    @Test
    public void re_nameFile() throws IOException{
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://node01:8020");
        FileSystem fileSystem = FileSystem.get(conf);
        fileSystem.rename(new Path("hdfs://node01:8020//mkdir010603")
                ,new Path("hdfs://node01:8020//mkdir010602"));
        fileSystem.close();
    }

/*部分源码
	public class LocatedFileStatus extends FileStatus {
  	private BlockLocation[] locations;


	public class FileStatus implements Writable, Comparable {

 	private Path path;
 	private long length;
 	private boolean isdir;
	private short block_replication;
	private long blocksize;
	private long modification_time;
	private long access_time;
	private FsPermission permission;
	private String owner;
	private String group;
	private Path symlink;
 */   


	@Test
 
   public void checkFileInfo() throws IOException{
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://node01:8020");
        FileSystem fileSystem = FileSystem.get(conf);
        RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(new Path("/"), true);     
        while (iterator.hasNext()){
            LocatedFileStatus status = iterator.next();
            //file name
            System.out.println(status.getPath().getName());
            //length
            System.out.println(status.getLen());
            //permission
            System.out.println(status.getPermission());
            //group
            System.out.println(status.getGroup());
            //block infomation
            BlockLocation[] blockLocations =  status.getBlockLocations();
            for (BlockLocation blockLocation :
                    blockLocations) {
                // Datanode hostnames
                String[] hosts = blockLocation.getHosts();
                for (String host: hosts
                     ) {
                    System.out.println(host);
                }
            }
        }
        fileSystem.close();

    }

    @Test
    public void streamUploadFile() throws URISyntaxException, IOException {
        //从本地输出到HDFS,调用java的FileInputStream输入流。HDFS的FSDataOutputStream的输出流
        //get filesystem
        Configuration conf = new Configuration();
        FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf);
        //create input iostream
        FileInputStream fis = new FileInputStream(new File("F:\\BigData\\data\\testData.txt"));
        //get output iostream
        FSDataOutputStream fos = fileSystem.create(new Path("hdfs://node01:8020/0519/rec.txt"));
        //connect stream
        IOUtils.copy(fis,fos);
        //close resource
        IOUtils.closeQuietly(fos);
        IOUtils.closeQuietly(fis);
        fileSystem.close();
    }
    @Test
    public void streamDownloadFile() throws URISyntaxException, IOException {
        //从HDFS输出到本地,调用HDFS的FSDataInputStream出入流,调用java的FileOutputStream输出流
        //会覆盖本地文件内容
        Configuration conf = new Configuration();
        FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01/8020"), conf);
        FSDataInputStream fis = fileSystem.open(new Path("hdfs://node01:8020/0519//rec.txt"));
        FileOutputStream fos = new FileOutputStream(new File("F:\\BigData\\data\\testData.txt"));
        IOUtils.copy(fis,fos);
        IOUtils.closeQuietly(fis);
        IOUtils.closeQuietly(fos);
        fileSystem.close();
    }
    @Test
    public void mergeFile() throws URISyntaxException, IOException, InterruptedException {
        Configuration conf = new Configuration();
        FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
        //获取HDFS输出流
        FSDataOutputStream fos = fileSystem.create(new Path("hdfs://node01:8020/mergedFile.xml"));
        //获得本地文件系统LocalFileSystem
        LocalFileSystem localFileSystem = FileSystem.getLocal(new Configuration(conf));
        //读取本地的文件
        FileStatus[] fileStatuses = localFileSystem.listStatus(new Path("F:\\BigData\\data"));
        //读取所有本地小文件,写入到hdfs的大文件里去
        for (FileStatus fileStatus :
                fileStatuses) {
            //获取每一个文件的路径
            Path path = fileStatus.getPath();
            System.out.println(path.getName());
            //读取本地小文件 使用fileSystem把path理解成HDFS路径
            //FSDataInputStream fis = fileSystem.open(path);
            FSDataInputStream fis = localFileSystem.open(path);
            IOUtils.copy(fis,fos);
            IOUtils.closeQuietly(fis);
        }
        IOUtils.closeQuietly(fos);
        localFileSystem.close();
        fileSystem.close();
    }

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值