一.基础命令
1)本地服务器上传一个文件到hdfs目录
hdfs dfs -mkdir -p /test/input
hdfs dfs -put /root/a.log /test/input
2)查看某个目录
hdfs dfs -ls /test
递归查看某个目录: hdfs dfs -ls -R /test
3)显示文件内容:
hdfs dfs -cat 文件
4)显示一个文件的末尾
hdfs dfs -tail 文件
5)hdfs之间的拷贝
hdfs dfs -cp /test/input/install.log /test/input/a.log
6)hdfs之间的重命名和移动
hdfs dfs -mv /test/input/a.log /test/a.log
7)从本地上传文件到hdfs
hdfs dsf -moveFromLocal a.log /lwg
8)从hdfs下载一个文件到本地
hdfs dfs -get /lwg/a.log ./
9)从hdfs某个目录下,下载多个文件,合并成本地一个文件
hdfs dfs -getmerge /lwg/* ./merge.txt
10)删除某个文件
hdfs dfs -rm /lwg/ghi.txt
11)文件系统的可用空间
hdfs dfs -df /lwg
12)文件的大小:
hdfs dfs -du /lwg/a.log
二. java api实现hdfs文件操作
1.导入maven依赖
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/junit/junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>RELEASE</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<!-- <verbal>true</verbal>-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>true</minimizeJar>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
2.具体操作代码
package com.hiekn;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* 使用文件系统读取HDFS数据
*/
public class ReadHdfs {
public static void main(String[] args) throws Exception {
//1.连接hdfs namenode端口,进行通信,获取文件系统
//Configuration conf = new Configuration();
//FileSystem fileSystem = FileSystem.get(new URI("hdfs://192.168.32.100:8020"), conf);
//1.2.FileSystem:方式二
//Configuration conf = new Configuration();
//FileSystem fileSystem = FileSystem.newInstance(new URI(" hdfs://192.168.32.100:8020"), conf);
//1.3.FileSystem:方式三
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://192.168.32.100:8020");
FileSystem fileSystem = FileSystem.newInstance(conf);
System.out.println(fileSystem.toString());
//2.递归遍历文件系统当中的所有的文件
//异常: Permission denied: user=lwg, access=READ_EXECUTE, inode="/tmp/hadoop-yarn":root:supergroup:drwx------
//解决:hdfs dfs -chmod -R 755 /
//listFile(fileSystem,new Path("/"));
//3.通过hadoop官网API直接遍历: 参数2:是否需要递归遍历
//listFileAPi(fileSystem,new Path("/"));
//4.下载文件到本地
//downloadFiletoLocal(fileSystem,"D:\\install.log",new Path("/test/input/install.log"));
//5.上传文件夹到hdfs
//uploadFileFromHDFS(fileSystem,new Path("file:///D:\\install.log"),new Path("/hello/mydir/test"));
//6.将本地多个文件系统,上传到hdfs,合并成一个大的文件
mergeLoadFile(fileSystem,"file:///E:\\bigdata\\新建文件夹",new Path("/test/bigfile87.xml"));
}
/**
* 2.递归遍历文件系统当中的所有的文件
*/
public static void listFile(FileSystem fileSystem,Path path) throws IOException {
FileStatus[] fileStatuses = fileSystem.listStatus(path);
for(FileStatus fileStatus : fileStatuses){
if(fileStatus.isDirectory()){
listFile(fileSystem,fileStatus.getPath());
}else {
Path path1 = fileStatus.getPath();
System.out.println("文件路径:" + path1);
}
}
fileSystem.close();
}
/**
*3.官网API遍历文件系统中所有文件
*/
public static void listFileAPi(FileSystem fileSystem,Path path) throws Exception{
RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(path, true);
while (files.hasNext()){
LocatedFileStatus next = files.next();
System.out.println(next.getPath().toString());
}
fileSystem.close();
}
/**
* 4.下载文件到本地
*/
private static void downloadFiletoLocal(FileSystem fileSystem, String path,Path hdfspath) throws IOException {
FSDataInputStream open = fileSystem.open(hdfspath);
FileOutputStream outputStream = new FileOutputStream(new File(path));
IOUtils.copy(open,outputStream);
IOUtils.closeQuietly(open);
IOUtils.closeQuietly(outputStream);
fileSystem.close();
}
/**
* 5.上传文件到hdfs
*/
private static void uploadFileFromHDFS(FileSystem fileSystem, Path local,Path hdfs) throws IOException {
fileSystem.copyFromLocalFile(local,hdfs);
fileSystem.close();
}
/**
* 6.将本地多个文件系统,上传到hdfs,合并成一个大的文件
*/
private static void mergeLoadFile(FileSystem fileSystem,String localpath,Path hdfs) throws IOException {
FSDataOutputStream outputStream = fileSystem.create(hdfs);
//获取本地文件系统
LocalFileSystem local = FileSystem.getLocal(new Configuration());
FileStatus[] fileStatuses = local.listStatus(new Path(localpath));
for(FileStatus fileStatus:fileStatuses){
FSDataInputStream inputStream = local.open(fileStatus.getPath());
IOUtils.copy(inputStream,outputStream);
IOUtils.closeQuietly(inputStream);
}
IOUtils.closeQuietly(outputStream);
local.close();
fileSystem.close();
}
}