目录
1、把本地e盘上的banhua.txt文件上传到HDFS根目录
3、分块读取HDFS上的大文件,比如根目录下的/hadoop-3.1.3.tar.gz
一、HDFS客户端环境准备
1、配置hadoop的环境变量
2、创建一个maven的工程
3、导入相应的依赖坐标+日志添加
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.30</version>
</dependency>
</dependencies>
4、创建log4j.properties文件
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
5、创建HdfsClient类
package com.kgf.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* @author kgf
* @date 2024-04-09 21:47
*/
public class HdfsClient {
@Test
public void testMkdirs() throws IOException, URISyntaxException, InterruptedException {
// 1 获取文件系统
Configuration configuration = new Configuration();
// FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration);
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration,"kgf");
// 2 创建目录
fs.mkdirs(new Path("/xiyou/huaguoshan/"));
// 3 关闭资源
fs.close();
}
}
运行之后的效果:
6、我们也可以换种方式去指定运行时的用户名称
package com.kgf.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* @author kgf
* @date 2024-04-09 21:47
*/
public class HdfsClient {
@Test
public void testMkdirs() throws IOException, URISyntaxException, InterruptedException {
// 1 获取文件系统
Configuration configuration = new Configuration();
// FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration);
// FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration,"kgf");
// 配置在集群上运行
configuration.set("fs.defaultFS", "hdfs://hadoop102:8020");
FileSystem fs = FileSystem.get(configuration);
// 2 创建目录
fs.mkdirs(new Path("/kgf/huaguoshan2/"));
// 3 关闭资源
fs.close();
}
}
客户端去操作HDFS时,是有一个用户身份的。默认情况下,HDFS客户端API会从JVM中获取一个参数来作为自己的用户身份:-DHADOOP_USER_NAME=kgf,kgf为用户名称。
然后运行结果:
二、HDFS的API操作
1、HDFS文件上传(测试参数优先级)
@Test
public void testCopyFromLocalFile() throws IOException, InterruptedException, URISyntaxException {
// 1 获取文件系统
Configuration configuration = new Configuration();
configuration.set("dfs.replication", "2");
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 上传文件
fs.copyFromLocalFile(new Path("D:\\test\\sunwukong.txt"), new Path("/xiyou/huaguoshan"));
// 3 关闭资源
fs.close();
}
将hdfs-site.xml拷贝到项目的根目录下
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
执行效果如下:
可以发现副本数是2:
参数优先级排序:
(1)客户端代码中设置的值 >
(2)ClassPath下的用户自定义配置文件 >
(3)然后是服务器的默认配置
2、HDFS文件下载
@Test
public void testCopyToLocalFile() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 执行下载操作
// boolean delSrc 指是否将原文件删除
// Path src 指要下载的文件路径
// Path dst 指将文件下载到的路径
// boolean useRawLocalFileSystem 是否开启文件校验
fs.copyToLocalFile(false, new Path("/xiyou/huaguoshan/sunwukong.txt"), new Path("d:/test/sunwukong2.txt"), true);
// 3 关闭资源
fs.close();
}
3、HDFS文件夹删除
@Test
public void testDelete() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 执行删除
fs.delete(new Path("/xiyou"), true);
// 3 关闭资源
fs.close();
}
4、HDFS文件名更改
@Test
public void testRename() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 修改文件名称
fs.rename(new Path("/xiyou/huaguoshan/sunwukong.txt"), new Path("/xiyou/huaguoshan/meihouwang.txt"));
// 3 关闭资源
fs.close();
}
5、HDFS文件详情查看
@Test
public void testListFiles() throws IOException, InterruptedException, URISyntaxException {
// 1获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 获取文件详情
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
while (listFiles.hasNext()) {
LocatedFileStatus fileStatus = listFiles.next();
System.out.println("========" + fileStatus.getPath() + "=========");
System.out.println(fileStatus.getPermission());
System.out.println(fileStatus.getOwner());
System.out.println(fileStatus.getGroup());
System.out.println(fileStatus.getLen());
System.out.println(fileStatus.getModificationTime());
System.out.println(fileStatus.getReplication());
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getPath().getName());
// 获取块信息
BlockLocation[] blockLocations = fileStatus.getBlockLocations();
System.out.println(Arrays.toString(blockLocations));
}
// 3 关闭资源
fs.close();
}
效果(将目录下的文件都递归查询出来了):
========hdfs://hadoop102:8020/sanguo/shuguo.txt=========
rw-r--r--
kgf
supergroup
14
1712584428337
3
134217728
shuguo.txt
[0,14,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/sanguo/weiguo.txt=========
rw-r--r--
kgf
supergroup
7
1712584320223
3
134217728
weiguo.txt
[0,7,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/sanguo/wuguo.txt=========
rw-r--r--
kgf
supergroup
6
1712584386463
3
134217728
wuguo.txt
[0,6,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/hadoop-yarn/staging/history/done/2024/04/07/000000/job_1712497116860_0001-1712497301940-kgf-word+count-1712497334178-1-1-SUCCEEDED-default-1712497309937.jhist=========
rwxrwx---
kgf
supergroup
29530
1712497334353
3
134217728
job_1712497116860_0001-1712497301940-kgf-word+count-1712497334178-1-1-SUCCEEDED-default-1712497309937.jhist
[0,29530,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/hadoop-yarn/staging/history/done/2024/04/07/000000/job_1712497116860_0001_conf.xml=========
rwxrwx---
kgf
supergroup
214632
1712497334420
3
134217728
job_1712497116860_0001_conf.xml
[0,214632,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/hadoop-yarn/staging/history/done/2024/04/07/000000/job_1712497461227_0001-1712497506972-kgf-word+count-1712497521263-1-1-SUCCEEDED-default-1712497511299.jhist=========
rwxrwx---
kgf
supergroup
22325
1712497521442
3
134217728
job_1712497461227_0001-1712497506972-kgf-word+count-1712497521263-1-1-SUCCEEDED-default-1712497511299.jhist
[0,22325,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/hadoop-yarn/staging/history/done/2024/04/07/000000/job_1712497461227_0001_conf.xml=========
rwxrwx---
kgf
supergroup
214633
1712497521512
3
134217728
job_1712497461227_0001_conf.xml
[0,214633,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/logs/kgf/logs-tfile/application_1712497116860_0001/hadoop102_33725=========
rw-r-----
kgf
kgf
34828
1712497341672
3
134217728
hadoop102_33725
[0,34828,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/logs/kgf/logs-tfile/application_1712497116860_0001/hadoop103_46062=========
rw-r-----
kgf
kgf
177956
1712497342092
3
134217728
hadoop103_46062
[0,177956,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/logs/kgf/logs-tfile/application_1712497461227_0001/hadoop103_41470=========
rw-r-----
kgf
kgf
98909
1712497529060
3
134217728
hadoop103_41470
[0,98909,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/tmp/logs/kgf/logs-tfile/application_1712497461227_0001/hadoop104_44943=========
rw-r-----
kgf
kgf
34364
1712497528837
3
134217728
hadoop104_44943
[0,34364,hadoop103,hadoop104,hadoop102]
========hdfs://hadoop102:8020/xiyou/huaguoshan/meihouwang.txt=========
rw-r--r--
kgf
supergroup
9
1712672340005
2
134217728
meihouwang.txt
[0,9,hadoop102,hadoop104]
Process finished with exit code 0
6、HDFS文件和文件夹判断
@Test
public void testListStatus() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件配置信息
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 判断是文件还是文件夹
FileStatus[] listStatus = fs.listStatus(new Path("/"));
for (FileStatus fileStatus : listStatus) {
// 如果是文件
if (fileStatus.isFile()) {
System.out.println("f:"+fileStatus.getPath().getName());
}else {
System.out.println("d:"+fileStatus.getPath().getName());
}
}
// 3 关闭资源
fs.close();
}
效果输出:
d:sanguo
d:tmp
d:xiyou
Process finished with exit code 0
三、HDFS的I/O流操作
1、把本地e盘上的banhua.txt文件上传到HDFS根目录
/***
* 把本地D盘上的banhua.txt文件上传到HDFS根目录
* @throws IOException
* @throws InterruptedException
* @throws URISyntaxException
*/
@Test
public void putFileToHDFS() throws IOException, InterruptedException, URISyntaxException {
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 创建输入流
FileInputStream fis = new FileInputStream(new File("D:\\test\\banhua.txt"));
// 3 获取输出流
FSDataOutputStream fos = fs.create(new Path("/banhua2.txt"));
// 4 流对拷
IOUtils.copyBytes(fis, fos, configuration);
// 5 关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
System.out.println("over!");
}
2、从HDFS上下载banhua.txt文件到本地e盘上
/***
* 从HDFS上下载banhua.txt文件到本地e盘上
* @throws IOException
* @throws InterruptedException
* @throws URISyntaxException
*/
@Test
public void getFileFromHDFS() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 获取输入流
FSDataInputStream fis = fs.open(new Path("/banhua2.txt"));
// 3 获取输出流
FileOutputStream fos = new FileOutputStream(new File("D:\\test\\banhua3.txt"));
// 4 流的对拷
IOUtils.copyBytes(fis, fos, configuration);
// 5 关闭资源
IOUtils.closeStream(fos);
IOUtils.closeStream(fis);
fs.close();
System.out.println("over");
}
3、分块读取HDFS上的大文件,比如根目录下的/hadoop-3.1.3.tar.gz
下载第一块block0(1024*1024*128=134217728=128M):
/**
* 下载第一块
* @throws IOException
* @throws InterruptedException
* @throws URISyntaxException
*/
@Test
public void readFileSeek1() throws IOException, InterruptedException, URISyntaxException{
// 1 获取文件系统
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("hdfs://hadoop102:8020"), configuration, "kgf");
// 2 获取输入流
FSDataInputStream fis = fs.open(new Path("/hadoop-3.1.3.tar.gz"));
// 3 创建输出流
FileOutputStream fos = new FileOutputStream(new File("D:\\test\\hadoop-3.1.3.tar.gz.part1"));
// 4 流的拷贝
byte[] buf = new byte[1024];
for(int i =0 ; i < 1024 * 128; i++){//计算下载第一块1024*1024*128=134217728
fis.read(buf);
fos.write(buf);
}
// 5关闭资源
IOUtils.closeStream(fis);
IOUtils.closeStream(fos);
fs.close();
}
可以下载剩余的两块block
合并文件:
在Window命令窗口中进入到目录E:\,然后执行如下命令,对数据进行合并
type hadoop3.1.3.tar.gz.part2 >> hadoop-3.1.3.tar.gz.part1
type hadoop3.1.3.tar.gz.part3 >> hadoop-3.1.3.tar.gz.part1
合并完成后,将hadoop-3.1.3.tar.gz.part1重新命名为hadoop-3.1.3.tar.gz。解压发现该tar包非常完整。