客户端操作
客户端环境准备
- 根据自己电脑的操作系统拷贝对应的编译后的 hadoop jar 包到非中文路径
- 配置 HADOOP_HOME 环境变量
- 配置 Path 环境变量
- 创建一个 Maven 工程 HdfsClientDemo
- 导入相应的依赖坐标+日志添加
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
<version>1.8</version>
<scope>system</scope>
<systemPath>C:/Program Files/Java/jdk1.8.0_181/lib/tools.jar</systemPath>
</dependency>
</dependencies>
- 如果 Eclipse/Idea 打印不出日志,需要在项目的 src/main/resources 目录下,新建一个文件,命名为“log4j.properties”,在 文件中填入
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c]- %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c]- %m%n
测试代码
package review.客户端操作.客户端环境准备;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class Test {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
/*
* 1、FileSystem 要选择 org.apache.hadoop.fs包下的
* 2、URI()传入的参数可以在 etc/hadoop/core-site.xml 文件下找到,不同的人可能有不同的参数
* 3、“hdfs” 是用户名字
*
* */
//创建新的文件夹
fs.mkdirs(new Path("/客户端环境准备/测试文件"));
//关闭资源
fs.close();
}
}
测试代码效果如上图所示
API操作
文件上传
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/*
* 文件上传
* */
public class FileUpload {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//上传操作
fs.copyFromLocalFile(new Path("F:/Hello.txt"), new Path("/Test/Hello.txt"));
//关闭资源
fs.close();
}
}
注:一般有两个地址的时候,第一地址代表原文件地址,第二个地址代表目标地址
文件下载
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FIleDownload {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//文件下载
//方式一:
//fs.copyToLocalFile(new Path("/Test/Hello.txt"), new Path("F:/Hello01.txt"));
//这种方式下载的时候,会多一个crc文件。crc文件是用来进行文件校验的,校验文件的可靠性
//方式二:
fs.copyToLocalFile(false, new Path("/Test/Hello.txt"), new Path("F:/Hello01.txt"), true);
//这种方式下载数据就不会有crc文件
//关闭资源
fs.close();
}
}
注意区分两种下载方式的区别,具体关于crc文件,可以在文件完整性章节了解
文件删除
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FileDelete {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、文件删除
fs.delete(new Path("/Test/Hello.txt"), true);
/*
* 第一个参数是需要删除文件的路径
* 第二个参数是是否需要递归删除
* 当需要删除的是一个文件时,true或者false均可
* 当需要删除的是一个文件夹的时候,只能使用true
* */
//3、资源关闭
fs.close();
}
}
注意删除文件和删除文件夹的区别
文件更名
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FIleChangeName {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、改名操作
fs.rename(new Path("/Test"), new Path("/Try"));
//3、关闭资源
fs.close();
}
}
文件详情查看
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FileView {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、查看文件详情
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
//使用递归器查看文件名称、权限、长度、块信息
while (listFiles.hasNext()){
LocatedFileStatus fileStatus = listFiles.next();
//文件名称
System.out.println("文件名称:" + fileStatus.getPath().getName());
//文件权限
System.out.println("文件权限:" + fileStatus.getPermission());
//文件长度
System.out.println("文件长度:" + fileStatus.getLen());
//块信息数组
BlockLocation[] blockLocations = fileStatus.getBlockLocations();
//递归块信息数组
for (BlockLocation blockLocation : blockLocations){
String[] hosts = blockLocation.getHosts(); //副本存储的主机
for (String host : hosts){
System.out.println("主机:" + host);
}
}
System.out.println("------------------分割线------------------");
}
//3、资源关闭
fs.close();
}
}
文件和文件夹判断
package review.客户端操作.API操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FileJudge {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、判断
FileStatus[] listStatus = fs.listStatus(new Path("/"));
for (FileStatus listStatu : listStatus){
if (listStatu.isFile()){
System.out.println("f:" + listStatu.getPath().getName());
} else if (listStatu.isDirectory()){
System.out.println("d:" + listStatu.getPath().getName());
}
}
//3、关闭资源
fs.close();
}
}
IO流操作
文件上传
package review.客户端操作.IO流操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FileUpload {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、获取输入流
FileInputStream fis = new FileInputStream(new File("F:/Test/Hello.txt"));
//3、获取输出流(因为输出流在HDFS上,所以可以使用fs)
FSDataOutputStream fos = fs.create(new Path("/Try/Hello.txt"));
//4、流的对拷(要使用 org.apache.hadoop.io包的 IOUtils)
IOUtils.copyBytes(fis,fos,conf);
//第三个参数是:缓冲区大小
//5、关闭资源(因为多了输入输出流,所以要多关闭两个流;先关输出流,再管输入流)
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
}
}
文件下载
package review.客户端操作.IO流操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class FileDownload {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、获取输入流(因为数据在HDFS上,所以使用fs对象获取)
FSDataInputStream fis = fs.open(new Path("/Try/Hello.txt"));
//3、获取输出流
FileOutputStream fos = new FileOutputStream(new File("F:/Test/Hello02.txt"));
//4、流的对拷
IOUtils.copyBytes(fis, fos, conf);
//5、关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
}
}
定位文件读取
- 下载第一块:
package review.客户端操作.IO流操作;
/*
* 存在多个Block文件的时候,单独下载某一个
* */
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
//单独下载第一个
public class LocationRead1 {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、获取输入流
FSDataInputStream fis = fs.open(new Path("/B5.zip"));
//3、获取输出流(只拷贝128M)
FileOutputStream fos = new FileOutputStream(new File("F:/Test/B5.zip.block1"));
//4、流的对拷
//IOUtils.copyBytes(fis, fos ,conf); //这样是将整个文件下载下来
//使用传统方法下载Block0的数据
byte[] bytes = new byte[1024];
for (int i = 0; i < 1024 * 128; i++) {
fis.read(bytes);
fos.write(bytes);
}
//5、关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
}
}
- 下载第二块
package review.客户端操作.IO流操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class LocationRead2 {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、获取输入流
FSDataInputStream fis = fs.open(new Path("/B5.zip"));
//3、设置指定读取起点
fis.seek(1024 * 1024 * 128);
//4、获取输出流
FileOutputStream fos = new FileOutputStream(new File("F:/Test/B5.zip.Block2"));
//5、流的对拷
byte[] bytes = new byte[1024];
for (int i = 0; i < 1024 * 128; i++) {
fis.read(bytes);
fos.write(bytes);
}
//6、关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
}
}
- 定位下载第三块
package review.客户端操作.IO流操作;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class LocationRead3 {
public static void main(String[] args) throws URISyntaxException, IOException, InterruptedException {
//1、获取对象
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://node1:9000"), conf, "hdfs");
//2、获取输入流
FSDataInputStream fis = fs.open(new Path("/B5.zip"));
//3、设置指定读取起点
fis.seek(1024 * 1024 * 128 * 2);
//4、获取输入流
FileOutputStream fos = new FileOutputStream(new File("F:/Test/B5.zip.Block3"));
//5、流的对拷
IOUtils.copyBytes(fis, fos, conf);
//6、关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
}
}
文件拼接
- 在下载下来的文件目录中,打开cmd
- 输入
type 后一块 >> 前一块
- 将拼接后的文件改成原文件的格式