(一)编程实现以下功能,并利用 Hadoop 提供的 Shell 命令完成相同任务:
(1) 向 HDFS 中上传任意文本文件,如果指定的文件在 HDFS 中已经存在,则由用户来指定是追加到原有文件末尾还是覆盖原有的文件;
开启Hadoop:
创建两个文件以供实验使用:
上传本地文件到hdfs系统的指令:
hadoop fs -put text.txt
文件存在,追加到文件末尾的指令:
hadoop fs -appendToFile local.txt text.txt
文件存在,覆盖文件的命令:
hadoop fs -copyFromLocal -f local.txt text.txt
代码实现:
packagecn.edu.zucc.hdfs;importjava.io.FileInputStream;importjava.io.IOException;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.fs.FSDataOutputStream;importorg.apache.hadoop.fs.FileSystem;importorg.apache.hadoop.fs.Path;public classCopyFromLocalFile {/*** 判断路径是否存在*/
public static booleantest(Configuration conf, String path) {try (FileSystem fs =FileSystem.get(conf)) {return fs.exists(newPath(path));
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 复制文件到指定路径 若路径已存在,则进行覆盖*/
public static voidcopyFromLocalFile(Configuration conf,
String localFilePath, String remoteFilePath) {
Path localPath= newPath(localFilePath);
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf)) {/*fs.copyFromLocalFile 第一个参数表示是否删除源文件,第二个参数表示是否覆盖*/fs.copyFromLocalFile(false, true, localPath, remotePath);
}catch(IOException e) {
e.printStackTrace();
}
}/*** 追加文件内容*/
public static voidappendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf);
FileInputStream in= newFileInputStream(localFilePath);) {
FSDataOutputStream out=fs.append(remotePath);byte[] data = new byte[1024];int read = -1;while ((read = in.read(data)) > 0) {
out.write(data,0, read);
}
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String localFilePath= "/usr/local/hadoop/text.txt"; //本地路径
String remoteFilePath= "/user/tiny/text.txt"; //HDFS路径//String choice = "append";//若文件存在则追加到文件末尾
String choice= "overwrite"; //若文件存在则覆盖
try{/*判断文件是否存在*/
boolean fileExists = false;if(CopyFromLocalFile.test(conf, remoteFilePath)) {
fileExists= true;
System.out.println(remoteFilePath+ " 已存在.");
}else{
System.out.println(remoteFilePath+ " 不存在.");
}/*进行处理*/
if (!fileExists) { //文件不存在,则上传
CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath+ " 已上传至 " +remoteFilePath);
}else if (choice.equals("overwrite")) { //选择覆盖
CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath+ " 已覆盖 " +remoteFilePath);
}else if (choice.equals("append")) { //选择追加
CopyFromLocalFile.appendToFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath+ " 已追加至 " +remoteFilePath);
}
}catch(Exception e) {
e.printStackTrace();
}
}
}
(2) 从 HDFS 中下载指定文件,如果本地文件与要下载的文件名称相同,则自动对下载的文件重命名;
Shell命令:if $(hadoop fs -test -e /usr/local/hadoop/text.txt);
then $(hadoop fs-copyToLocal text.txt ./text.txt);else $(hadoop fs -copyToLocal text.txt ./text2.txt);
fi
代码实现:packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classCopyToLocal {/*** 下载文件到本地 判断本地路径是否已存在,若已存在,则自动进行重命名*/
public static voidcopyToLocal(Configuration conf, String remoteFilePath,
String localFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf)) {
File f= newFile(localFilePath);/*如果文件名存在,自动重命名(在文件名后面加上 _0, _1 ...)*/
if(f.exists()) {
System.out.println(localFilePath+ " 已存在.");
Integer i= Integer.valueOf(0);while (true) {
f= new File(localFilePath + "_" +i.toString());if (!f.exists()) {
localFilePath= localFilePath + "_" +i.toString();break;
}else{
i++;continue;
}
}
System.out.println("将重新命名为: " +localFilePath);
}//下载文件到本地
Path localPath= newPath(localFilePath);
fs.copyToLocalFile(remotePath, localPath);
}catch(IOException e) {//TODO Auto-generated catch block
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String localFilePath= "/usr/local/hadoop/text.txt"; //本地路径
String remoteFilePath= "/user/tiny/text.txt"; //HDFS路径
try{
CopyToLocal.copyToLocal(conf, remoteFilePath, localFilePath);
System.out.println("下载完成");
}catch(Exception e) {
e.printStackTrace();
}
}
}
(3) 将 HDFS 中指定文件的内容输出到终端中;
Shell命令:
hadoop fs -cat text.txt
代码实现:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classCat {/*** 读取文件内容*/
public static voidcat(Configuration conf, String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf);
FSDataInputStream in=fs.open(remotePath);
BufferedReader d= new BufferedReader(newInputStreamReader(in));) {
String line;while ((line = d.readLine()) != null) {
System.out.println(line);
}
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/input/text.txt"; //HDFS路径
try{
System.out.println("读取文件: " +remoteFilePath);
Cat.cat(conf, remoteFilePath);
System.out.println("\n读取完成");
}catch(Exception e) {
e.printStackTrace();
}
}
}
(4) 显示 HDFS 中指定的文件的读写权限、大小、创建时间、路径等信息;
Shell命令:
hadoop fs -ls -h text.txt
代码实现:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;importjava.text.SimpleDateFormat;public classList {/*** 显示指定文件的信息*/
public static voidls(Configuration conf, String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
FileStatus[] fileStatuses=fs.listStatus(remotePath);for(FileStatus s : fileStatuses) {
System.out.println("路径: " +s.getPath().toString());
System.out.println("权限: " +s.getPermission().toString());
System.out.println("大小: " +s.getLen());/*返回的是时间戳,转化为时间日期格式*/
long timeStamp =s.getModificationTime();
SimpleDateFormat format= newSimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String date=format.format(timeStamp);
System.out.println("时间: " +date);
}
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/text.txt"; //HDFS路径
try{
System.out.println("读取文件信息: " +remoteFilePath);
List.ls(conf, remoteFilePath);
System.out.println("\n读取完成");
}catch(Exception e) {
e.printStackTrace();
}
}
}
(5) 给定 HDFS 中某一个目录,输出该目录下的所有文件的读写权限、大小、创建时间、路径等信息,如果该文件是目录,则递归输出该目录下所有文件相关信息;
Shell命令:hadoop fs -ls -R -h /user/tiny
代码实现:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;importjava.text.SimpleDateFormat;public classListDir {/*** 显示指定文件夹下所有文件的信息(递归)*/
public static voidlsDir(Configuration conf, String remoteDir) {try (FileSystem fs =FileSystem.get(conf)) {
Path dirPath= newPath(remoteDir);/*递归获取目录下的所有文件*/RemoteIterator remoteIterator =fs.listFiles(
dirPath,true);/*输出每个文件的信息*/
while(remoteIterator.hasNext()) {
FileStatus s=remoteIterator.next();
System.out.println("路径: " +s.getPath().toString());
System.out.println("权限: " +s.getPermission().toString());
System.out.println("大小: " +s.getLen());/*返回的是时间戳,转化为时间日期格式*/Long timeStamp=s.getModificationTime();
SimpleDateFormat format= newSimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String date=format.format(timeStamp);
System.out.println("时间: " +date);
System.out.println();
}
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteDir= "/user/tiny"; //HDFS路径
try{
System.out.println("(递归)读取目录下所有文件的信息: " +remoteDir);
ListDir.lsDir(conf, remoteDir);
System.out.println("读取完成");
}catch(Exception e) {
e.printStackTrace();
}
}
}
(6) 提供一个 HDFS 内的文件的路径,对该文件进行创建和删除操作。如果文件所在目录不存在,则自动创建目录;
Shell命令:
if $(hadoop fs -test -d dir1/dir2);
then $(hadoop fs -touchz dir1/dir2/filename);
else $(hadoop fs -mkdir -p dir1/dir2 && hdfs dfs -touchz dir1/dir2/filename);
fi
代码:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;import java.io.*;public classRemoveOrMake {/*** 判断路径是否存在*/
public static booleantest(Configuration conf, String path) {try (FileSystem fs =FileSystem.get(conf)) {return fs.exists(newPath(path));
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 创建目录*/
public static booleanmkdir(Configuration conf, String remoteDir) {try (FileSystem fs =FileSystem.get(conf)) {
Path dirPath= newPath(remoteDir);returnfs.mkdirs(dirPath);
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 创建文件*/
public static voidtouchz(Configuration conf, String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf)) {
FSDataOutputStream outputStream=fs.create(remotePath);
outputStream.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 删除文件*/
public static booleanrm(Configuration conf, String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf)) {return fs.delete(remotePath, false);
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/input/text.txt"; //HDFS路径
String remoteDir= "/user/tiny/input"; //HDFS路径对应的目录
try{/*判断路径是否存在,存在则删除,否则进行创建*/
if(RemoveOrMake.test(conf, remoteFilePath)) {
RemoveOrMake.rm(conf, remoteFilePath);//删除
System.out.println("删除文件: " +remoteFilePath);
}else{if (!RemoveOrMake.test(conf, remoteDir)) { //若目录不存在,则进行创建
RemoveOrMake.mkdir(conf, remoteDir);
System.out.println("创建文件夹: " +remoteDir);
}
RemoveOrMake.touchz(conf, remoteFilePath);
System.out.println("创建文件: " +remoteFilePath);
}
}catch(Exception e) {
e.printStackTrace();
}
}
}
(7) 提供一个 HDFS 的目录的路径,对该目录进行创建和删除操作。创建目录时,如果目录文件所在目录不存在,则自动创建相应目录;删除目录时,由用户指定当该目录不为空时是否还删除该目录;
Shell命令:
if $(hadoop fs -test -d dir1/dir2);
then $(hadoop fs -touchz dir1/dir2/filename);
else $(hadoop fs -mkdir -p dir1/dir2 && hdfs dfs -touchz dir1/dir2/filename);
fi
代码:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classAppendToFile {/*** 判断路径是否存在*/
public static booleantest(Configuration conf, String path) {try (FileSystem fs =FileSystem.get(conf)) {return fs.exists(newPath(path));
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 追加文本内容*/
public static voidappendContentToFile(Configuration conf, String content,
String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);/*创建一个文件输出流,输出的内容将追加到文件末尾*/FSDataOutputStream out=fs.append(remotePath);
out.write(content.getBytes());
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 追加文件内容*/
public static voidappendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf);
FileInputStream in= newFileInputStream(localFilePath);) {
FSDataOutputStream out=fs.append(remotePath);byte[] data = new byte[1024];int read = -1;while ((read = in.read(data)) > 0) {
out.write(data,0, read);
}
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 移动文件到本地 移动后,删除源文件*/
public static voidmoveToLocalFile(Configuration conf,
String remoteFilePath, String localFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
Path localPath= newPath(localFilePath);
fs.moveToLocalFile(remotePath, localPath);
}catch(IOException e) {
e.printStackTrace();
}
}/*** 创建文件*/
public static voidtouchz(Configuration conf, String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
FSDataOutputStream outputStream=fs.create(remotePath);
outputStream.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/text.txt"; //HDFS文件
String content= "新追加的内容\n";
String choice= "after"; //追加到文件末尾//String choice = "before";//追加到文件开头
try{/*判断文件是否存在*/
if (!AppendToFile.test(conf, remoteFilePath)) {
System.out.println("文件不存在: " +remoteFilePath);
}else{if (choice.equals("after")) { //追加在文件末尾
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);
System.out.println("已追加内容到文件末尾" +remoteFilePath);
}else if (choice.equals("before")) { //追加到文件开头
/*没有相应的api可以直接操作,因此先把文件移动到本地,创建一个新的HDFS,再按顺序追加内容*/String localTmpPath= "/user/hadoop/tmp.txt";
AppendToFile.moveToLocalFile(conf, remoteFilePath,
localTmpPath);//移动到本地
AppendToFile.touchz(conf, remoteFilePath);//创建一个新文件
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);//先写入新内容
AppendToFile.appendToFile(conf, localTmpPath,
remoteFilePath);//再写入原来内容
System.out.println("已追加内容到文件开头: " +remoteFilePath);
}
}
}catch(Exception e) {
e.printStackTrace();
}
}
}
(8) 向 HDFS 中指定的文件追加内容,由用户指定内容追加到原有文件的开头或结尾;
Shell命令:
追加带文件末尾:
hadoop fs -appendToFile local.txt text.txt
追加到文件开头:
hadoopfs-gettext.txt
cattext.txt>>local.txt
hadoopfs-copyFromLocal-ftext.txttext.txt
代码:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classAppendToFile {/*** 判断路径是否存在*/
public static booleantest(Configuration conf, String path) {try (FileSystem fs =FileSystem.get(conf)) {return fs.exists(newPath(path));
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 追加文本内容*/
public static voidappendContentToFile(Configuration conf, String content,
String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);/*创建一个文件输出流,输出的内容将追加到文件末尾*/FSDataOutputStream out=fs.append(remotePath);
out.write(content.getBytes());
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 追加文件内容*/
public static voidappendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf);
FileInputStream in= newFileInputStream(localFilePath);) {
FSDataOutputStream out=fs.append(remotePath);byte[] data = new byte[1024];int read = -1;while ((read = in.read(data)) > 0) {
out.write(data,0, read);
}
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 移动文件到本地 移动后,删除源文件*/
public static voidmoveToLocalFile(Configuration conf,
String remoteFilePath, String localFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
Path localPath= newPath(localFilePath);
fs.moveToLocalFile(remotePath, localPath);
}catch(IOException e) {
e.printStackTrace();
}
}/*** 创建文件*/
public static voidtouchz(Configuration conf, String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
FSDataOutputStream outputStream=fs.create(remotePath);
outputStream.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/text.txt"; //HDFS文件
String content= "新追加的内容\n";
String choice= "after"; //追加到文件末尾//String choice = "before";//追加到文件开头
try{/*判断文件是否存在*/
if (!AppendToFile.test(conf, remoteFilePath)) {
System.out.println("文件不存在: " +remoteFilePath);
}else{if (choice.equals("after")) { //追加在文件末尾
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);
System.out.println("已追加内容到文件末尾" +remoteFilePath);
}else if (choice.equals("before")) { //追加到文件开头
/*没有相应的api可以直接操作,因此先把文件移动到本地,创建一个新的HDFS,再按顺序追加内容*/String localTmpPath= "/user/hadoop/tmp.txt";
AppendToFile.moveToLocalFile(conf, remoteFilePath,
localTmpPath);//移动到本地
AppendToFile.touchz(conf, remoteFilePath);//创建一个新文件
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);//先写入新内容
AppendToFile.appendToFile(conf, localTmpPath,
remoteFilePath);//再写入原来内容
System.out.println("已追加内容到文件开头: " +remoteFilePath);
}
}
}catch(Exception e) {
e.printStackTrace();
}
}
}
(9) 删除 HDFS 中指定的文件;
Shell命令:Hadoop fs -rmr test
(10) 在 HDFS 中,将文件从源路径移动到目的路径。
Shell命令:hadoop fs -mv text.txt input
代码:
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classMoveFile {/*** 移动文件*/
public static booleanmv(Configuration conf, String remoteFilePath,
String remoteToFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path srcPath= newPath(remoteFilePath);
Path dstPath= newPath(remoteToFilePath);returnfs.rename(srcPath, dstPath);
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "hdfs:///user/tiny/text.txt"; //源文件HDFS路径
String remoteToFilePath= "hdfs:///user/tiny/input"; //目的HDFS路径
try{if(MoveFile.mv(conf, remoteFilePath, remoteToFilePath)) {
System.out.println("将文件 " + remoteFilePath + " 移动到 "
+remoteToFilePath);
}else{
System.out.println("操作失败(源文件不存在或移动失败)");
}
}catch(Exception e) {
e.printStackTrace();
}
}
}
(二)编程实现一个类“MyFSDataInputStream”,该类继承“org.apache.hadoop.fs.FSDataInputSt ream”,要求如下:实现按行读取 HDFS 中指定文件的方法“readLine()”,如果读到文件末尾,则返回空,否则返回文件一行的文本。
代码:
packagecn.edu.zucc.hdfs;importjava.io.BufferedReader;importjava.io.IOException;importjava.io.InputStream;importjava.io.InputStreamReader;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;public class MyFSDataInputStream extendsFSDataInputStream {publicMyFSDataInputStream(InputStream in) {super(in);
}public staticString readline(Configuration conf, String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
FSDataInputStream in=fs.open(remotePath);
BufferedReader d= new BufferedReader(newInputStreamReader(in));
String line= null;if ((line = d.readLine()) != null) {
d.close();
in.close();returnline;
}return null;
}catch(IOException e) {//TODO Auto-generated catch block
e.printStackTrace();return null;
}
}public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.default.name", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/text.txt"; //HDFS路径
System.out.println("读取文件: " +remoteFilePath);
System.out.println(MyFSDataInputStream.readline(conf, remoteFilePath));
System.out.println("\n读取完成");
}
}
(三)查看 Java 帮助手册或其它资料,用“java.net.URL”和“org.apache.hadoop.fs.FsURLStrea mHandlerFactory”编程完成输出 HDFS 中指定文件的文本到终端中。
代码:
packagecn.edu.zucc.hdfs;importjava.io.IOException;importjava.io.InputStream;importjava.net.URL;import org.apache.hadoop.fs.*;importorg.apache.hadoop.io.IOUtils;public classFsUrl {static{
URL.setURLStreamHandlerFactory(newFsUrlStreamHandlerFactory());
}public static voidcat(String remoteFilePath) {try (InputStream in = new URL("hdfs", "localhost", 9000, remoteFilePath)
.openStream()) {
IOUtils.copyBytes(in, System.out,4096, false);
IOUtils.closeStream(in);
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
String remoteFilePath= "/user/tiny/text.txt"; //HDFS路径
try{
System.out.println("读取文件: " +remoteFilePath);
FsUrl.cat(remoteFilePath);
System.out.println("\n读取完成");
}catch(Exception e) {
e.printStackTrace();
}
}
}
packagecn.edu.zucc.hdfs;importorg.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.*;importorg.apache.hadoop.fs.FileSystem;import java.io.*;public classAppendToFile {/*** 判断路径是否存在*/
public static booleantest(Configuration conf, String path) {try (FileSystem fs =FileSystem.get(conf)) {return fs.exists(newPath(path));
}catch(IOException e) {
e.printStackTrace();return false;
}
}/*** 追加文本内容*/
public static voidappendContentToFile(Configuration conf, String content,
String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);/*创建一个文件输出流,输出的内容将追加到文件末尾*/FSDataOutputStream out=fs.append(remotePath);
out.write(content.getBytes());
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 追加文件内容*/
public static voidappendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath= newPath(remoteFilePath);try (FileSystem fs =FileSystem.get(conf);
FileInputStream in= newFileInputStream(localFilePath);) {
FSDataOutputStream out=fs.append(remotePath);byte[] data = new byte[1024];int read = -1;while ((read = in.read(data)) > 0) {
out.write(data,0, read);
}
out.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 移动文件到本地 移动后,删除源文件*/
public static voidmoveToLocalFile(Configuration conf,
String remoteFilePath, String localFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
Path localPath= newPath(localFilePath);
fs.moveToLocalFile(remotePath, localPath);
}catch(IOException e) {
e.printStackTrace();
}
}/*** 创建文件*/
public static voidtouchz(Configuration conf, String remoteFilePath) {try (FileSystem fs =FileSystem.get(conf)) {
Path remotePath= newPath(remoteFilePath);
FSDataOutputStream outputStream=fs.create(remotePath);
outputStream.close();
}catch(IOException e) {
e.printStackTrace();
}
}/*** 主函数*/
public static voidmain(String[] args) {
Configuration conf= newConfiguration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath= "/user/tiny/text.txt"; //HDFS文件
String content= "新追加的内容\n";
String choice= "after"; //追加到文件末尾//String choice = "before";//追加到文件开头
try{/*判断文件是否存在*/
if (!AppendToFile.test(conf, remoteFilePath)) {
System.out.println("文件不存在: " +remoteFilePath);
}else{if (choice.equals("after")) { //追加在文件末尾
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);
System.out.println("已追加内容到文件末尾" +remoteFilePath);
}else if (choice.equals("before")) { //追加到文件开头
/*没有相应的api可以直接操作,因此先把文件移动到本地,创建一个新的HDFS,再按顺序追加内容*/String localTmpPath= "/user/hadoop/tmp.txt";
AppendToFile.moveToLocalFile(conf, remoteFilePath,
localTmpPath);//移动到本地
AppendToFile.touchz(conf, remoteFilePath);//创建一个新文件
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);//先写入新内容
AppendToFile.appendToFile(conf, localTmpPath,
remoteFilePath);//再写入原来内容
System.out.println("已追加内容到文件开头: " +remoteFilePath);
}
}
}catch(Exception e) {
e.printStackTrace();
}
}
}