大数据从入门到实战 - 大数据系统及应用-HDFS实训
叮嘟!这里是小啊呜的学习课程资料整理。好记性不如烂笔头,今天也是努力进步的一天。一起加油进阶吧!
一、关于此次实践
1、实战简介
Hadoop是一个由Apache基金会所开发的分布式系统基础架构,可以在不了解分布式底层细节的情况下,开发分布式程序,以满足在低性能的集群上实现对高容错,高并发的大数据集的高速运算和存储的需要。Hadoop支持超大文件(可达PB级),能够检测和快速应对硬件故障、支持流式数据访问、同时在简化的一致性模型的基础上保证了高容错性。因而被大规模部署在分布式系统中,应用十分广泛。
实验目的
理解HDFS在Hadoop体系结构中的角色;
熟悉HDFS操作常用的Java API。
2、全部任务
二、实践详解
1、第1关:HDFS Java API编程 ——文件读写
import java.io.*;
import java.sql.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class hdfs {
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();//configuration类实现hadoop各模块之间值的传递
FileSystem fs = FileSystem.get(conf); //获取文件系统
Path file = new Path("/user/hadoop/myfile"); //创建文件
FSDataOutputStream outStream = fs.create(file); //获取输出流
outStream.writeUTF("https://www.educoder.net");//可以写入任意字符
outStream.close();//记得关闭输出流
FSDataInputStream inStream = fs.open(file); //获取输入流
String data = inStream.readUTF(); //读取文件
}
}
评测
2、第2关:HDFS Java API编程——文件上传
import java.io.*;
import java.sql.Date;
import java.util.Scanner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class hdfs {
/**
* 判断路径是否存在
*/
public static boolean test(Configuration conf, String path) throws IOException {
/*****start*****/
//请在此处编写判断文件是否存在的代码
try(FileSystem fs = FileSystem.get(conf)){
return fs.exists(new Path(path));
} catch (IOException e){
e.printStackTrace();
return false;
}
/*****end*****/
}
/**
* 复制文件到指定路径
* 若路径已存在,则进行覆盖
*/
public static void copyFromLocalFile(Configuration conf, String localFilePath, String remoteFilePath) throws IOException {
/*****start*****/
//请在此处编写复制文件到指定路径的代码
Path localPath = new Path(localFilePath);
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf)) {
fs.copyFromLocalFile(false, true, localPath, remotePath);
} catch (IOException e) {
e.printStackTrace();
}
/*****end*****/
}
/**
* 追加文件内容
*/
public static void appendToFile(Configuration conf, String localFilePath, String remoteFilePath) throws IOException {
/*****start*****/
//请在此处编写追加文件内容的代码
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf);
FileInputStream in = new FileInputStream(localFilePath);) {
FSDataOutputStream out = fs.append(remotePath);
byte[] data = new byte[1024];
int read = -1;
while ((read = in.read(data)) > 0) {
out.write(data, 0, read);
}
out.close();
} catch (IOException e) {
e.printStackTrace();
}
/*****end*****/
}
/**
* 主函数
*/
public static void main(String[] args)throws IOException {
Configuration conf = new Configuration();
createHDFSFile(conf);
String localFilePath = "./file/text.txt"; // 本地路径
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径
String choice = "";
try {
/* 判断文件是否存在 */
Boolean fileExists = false;
if (hdfs.test(conf, remoteFilePath)) {
fileExists = true;
System.out.println(remoteFilePath + " 已存在.");
choice = "append"; //若文件存在则追加到文件末尾
} else {
System.out.println(remoteFilePath + " 不存在.");
choice = "overwrite"; //覆盖
}
/*****start*****/
//请在此处编写文件不存在则上传 文件choice等于overwrite则覆盖 choice 等于append 则追加的逻辑
if (!fileExists) { // 文件不存在,则上传
hdfs.copyFromLocalFile(conf, localFilePath, remoteFilePath);
System.out.println(localFilePath + " 已上传至 " + remoteFilePath);
} else if (choice.equals("overwrite")) { // 选择覆盖
hdfs.copyFromLocalFile(conf, localFilePath, remoteFilePath);
System.out.println(localFilePath + " 已覆盖 " + remoteFilePath);
} else if (choice.equals("append")) { // 选择追加
hdfs.appendToFile(conf, localFilePath, remoteFilePath);
System.out.println(localFilePath + " 已追加至 " + remoteFilePath);
}
/*****end*****/
} catch (Exception e) {
e.printStackTrace();
}
}
//创建HDFS文件
public static void createHDFSFile(Configuration conf)throws IOException{
FileSystem fs = FileSystem.get(conf); //获取文件系统
Path file = new Path("/user/hadoop/text.txt"); //创建文件
FSDataOutputStream outStream = fs.create(file); //获取输出流
outStream.writeUTF("hello");
outStream.close();
fs.close();
}
}
评测
3、第3关:HDFS Java API编程 ——文件下载
import java.io.*;
import java.sql.Date;
import java.util.Scanner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class hdfs {
/**
* 下载文件到本地
* 判断本地路径是否已存在,若已存在,则自动进行重命名
*/
public static void copyToLocal(Configuration conf, String remoteFilePath, String localFilePath) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path remotePath = new Path(remoteFilePath);
File f = new File(localFilePath);
/*****start*****/
/*在此添加判断文件是否存在的代码,如果文件名存在,自动重命名(在文件名后面加上 _0, _1 ...) */
if (f.exists()) {
System.out.println(localFilePath + " 已存在.");
Integer i = 0;
while ( true) {
f = new File( localFilePath + "_" + i.toString() );
if (!f.exists() ) {
localFilePath = localFilePath + "_" + i.toString() ;
break;
}
}
System.out.println("将重新命名为: " + localFilePath);
}
/*****end*****/
/*****start*****/
// 在此添加将文件下载到本地的代码
Path localPath = new Path(localFilePath);
fs.copyToLocalFile(remotePath, localPath);
/*****end*****/
fs.close();
}
/**
* 主函数
*/
public static void main(String[] args)throws IOException {
Configuration conf = new Configuration();
createHDFSFile(conf);
String localFilePath = "/tmp/output/text.txt"; // 本地路径
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径
try {
//调用方法下载文件至本地
hdfs.copyToLocal(conf, remoteFilePath, localFilePath);
System.out.println("下载完成");
} catch (Exception e) {
e.printStackTrace();
}
}
//创建HDFS文件
public static void createHDFSFile(Configuration conf)throws IOException{
FileSystem fs = FileSystem.get(conf); //获取文件系统
Path file = new Path("/user/hadoop/text.txt"); //创建文件
FSDataOutputStream outStream = fs.create(file); //获取输出流
outStream.writeUTF("hello hadoop HDFS www.educoder.net");
outStream.close();
fs.close();
}
}
评测
4、第4关:HDFS Java API编程 ——使用字符流读取数据
import java.io.*;
import java.sql.Date;
import java.util.Scanner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class hdfs {
/**
* 读取文件内容
*/
public static void cat(Configuration conf, String remoteFilePath) throws IOException {
/*****start*****/
//1.读取文件中的数据
Path remotePath = new Path(remoteFilePath);
FileSystem fs = FileSystem.get(conf);
FSDataInputStream in = fs.open(remotePath);
BufferedReader d = new BufferedReader(new InputStreamReader(in));
String line = null;
StringBuffer buffer = new StringBuffer();
while ((line = d.readLine()) != null)
{ buffer.append(line); }
String res = buffer.toString();
//2.将读取到的数据输出到 /tmp/output/text.txt 文件中 提示:可以使用FileWriter
FileWriter f1=new FileWriter("/tmp/output/text.txt");
f1.write(res);
f1.close();
/*****end*****/
}
/**
* 主函数
*/
public static void main(String[] args)throws IOException {
Configuration conf = new Configuration();
createHDFSFile(conf);
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径
try {
System.out.println("读取文件: " + remoteFilePath);
hdfs.cat(conf, remoteFilePath);
System.out.println("\n读取完成");
} catch (Exception e) {
e.printStackTrace();
}
}
//创建HDFS文件
public static void createHDFSFile(Configuration conf)throws IOException{
FileSystem fs = FileSystem.get(conf); //获取文件系统
Path file = new Path("/user/hadoop/text.txt"); //创建文件
FSDataOutputStream outStream = fs.create(file); //获取输出流
outStream.writeUTF("hello hadoop HDFS step4 www.educoder.net");
outStream.close();
fs.close();
}
}
评测
5、第5关:HDFS Java API编程 ——删除文件
import java.io.*;
import java.sql.Date;
import java.util.Scanner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class hdfs {
/**
* 删除文件
*/
public static boolean rm(Configuration conf, String remoteFilePath) throws IOException {
/*****start*****/
//请在此添加删除文件的代码
FileSystem fs = FileSystem.get(conf);
Path remotePath = new Path(remoteFilePath);
boolean result = fs.delete(remotePath,false);
return true ;
/*****end*****/
}
/**
* 主函数
*/
public static void main(String[] args) {
Configuration conf = new Configuration();
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS文件
try {
if (rm(conf, remoteFilePath) ) {
System.out.println("文件删除: " + remoteFilePath);
} else {
System.out.println("操作失败(文件不存在或删除失败)");
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
评测
6、第6关:HDFS Java API编程 ——删除文件夹
import java.io.*;
import java.sql.Date;
import java.util.Scanner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
public class hdfs {
/**
* 判断目录是否为空
* true: 空,false: 非空
*/
public static boolean isDirEmpty(Configuration conf, String remoteDir) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(remoteDir);
RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(dirPath, true);
return !remoteIterator.hasNext();
}
/**
* 删除目录
*/
public static boolean rmDir(Configuration conf, String remoteDir, boolean recursive) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(remoteDir);
/* 第二个参数表示是否递归删除所有文件 */
boolean result = fs.delete(dirPath, recursive);
fs.close();
return result;
}
/**
* 主函数
*/
public static void main(String[] args) {
Configuration conf = new Configuration();
String remoteDir = "/user/hadoop/dir/"; // HDFS目录
String remoteDir1 = "/user/hadoop/tmp/"; // HDFS目录
Boolean forceDelete = false; // 是否强制删除
try {
if ( !isDirEmpty(conf, remoteDir) && !forceDelete ) {
System.out.println("目录不为空,不删除");
} else {
if ( rmDir(conf, remoteDir, forceDelete) ) {
System.out.println("目录已删除: " + remoteDir);
} else {
System.out.println("操作失败");
}
}
if ( !isDirEmpty(conf, remoteDir1) && !forceDelete ) {
System.out.println("目录不为空,不删除");
} else {
if ( rmDir(conf, remoteDir1, forceDelete) ) {
System.out.println("目录已删除: " + remoteDir1);
} else {
System.out.println("操作失败");
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
评测
7、第7关:HDFS Java API编程 ——自定义数据输入流
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.*;
public class MyFSDataInputStream extends FSDataInputStream {
public MyFSDataInputStream(InputStream in) {
super(in);
}
/**
* 实现按行读取 * 每次读入一个字符,遇到"\n"结束,返回一行内容
*/
public static String readline(BufferedReader br) throws IOException {
char[] data = new char[1024];
int read = -1;
int off = 0; // 循环执行时,br 每次会从上一次读取结束的位置继续读取,因此该函数里,off 每次都从0开始
while ( (read = br.read(data, off, 1)) != -1 ) {
if (String.valueOf(data[off]).equals("\n") ) {
off += 1;
return String.valueOf(data, 0, read);
}
off += 1;
return String.valueOf(data, 0, read);
}
return null;
}
/**
* 读取文件内容
*/
public static void cat(Configuration conf, String remoteFilePath) throws IOException {
FileSystem fs = FileSystem.get(conf);
Path remotePath = new Path(remoteFilePath);
FSDataInputStream in = fs.open(remotePath);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
FileWriter f = new FileWriter("/tmp/output/text.txt");
String line = null;
while ( (line = MyFSDataInputStream.readline(br)) != null ) {
f.write(line);
}
f.close();
br.close();
in.close();
fs.close();
}
/**
* 主函数
*/
public static void main(String[] args) {
Configuration conf = new Configuration();
String remoteFilePath = "/user/hadoop/text.txt"; // HDFS路径
try {
MyFSDataInputStream.cat(conf, remoteFilePath);
} catch (Exception e) {
e.printStackTrace();
}
}
}
评测
Ending!
更多课程知识学习记录随后再来吧!
就酱,嘎啦!
注:
人生在勤,不索何获。