实验平台
操作系统: Ubuntu 18.04.4LTS
Hadoop 版本: 2.7.7
JDK 版本: 1.8.0_241
Java IDE: Eclipse
(一)编程实现以下功能,并利用 Hadoop 提供的 Shell 命令完成相同任务:
(1) 向 HDFS 中上传任意文本文件,如果指定的文件在 HDFS 中已经存在,则由用户来指定是追加到原有文件末尾还是覆盖原有的文件;
Shell命令:
先到Hadoop主文件夹(cd /usr/local/hadoop)并启动Hadoop服务(start-all.sh),然后创建两个任意文本文件用于实验:
echo "hello world" > local.txt
echo "hello hadoop" >text.txt
检查文件是否存在
hadoop fs -test -e text.txt
echo $?
创建用户工作目录(HDFS默认工作目录格式为/user/当前用户)
hadoop fs -mkdir -p /shiyansan
上传本地文件到HDFS
hadoop fs -put text.txt /shiyansan
if $(hadoop fs -test -e /shiyansan/text.txt);
then $(hadoop fs -appendToFile local.txt /shiyansan/text.txt); #追加到文件末尾的指令
else $(hadoop fs -copyFromLocal -f local.txt /shiyansan/text.txt); #覆盖原有文件的指令
fi
编程:
package shiyan3;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.util.Properties;
public class CopyFromLocalFile {
/**
* 判断路径是否存在
*/
public static boolean test(Configuration conf, String path) {
try (FileSystem fs = FileSystem.get(conf)) {
return fs.exists(new Path(path));
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 复制文件到指定路径 若路径已存在,则进行覆盖
*/
public static void copyFromLocalFile(Configuration conf,
String localFilePath, String remoteFilePath) {
Path localPath = new Path(localFilePath);
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf)) {
/* fs.copyFromLocalFile 第一个参数表示是否删除源文件,第二个参数表示是否覆盖 */
fs.copyFromLocalFile(false, true, localPath, remotePath);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 追加文件内容
*/
public static void appendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf);
FileInputStream in = new FileInputStream(localFilePath);) {
FSDataOutputStream out = fs.append(remotePath);
byte[] data = new byte[1024];
int read = -1;
while ((read = in.read(data)) > 0) {
out.write(data, 0, read);
}
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String localFilePath = "/usr/local/hadoop/text.txt"; // 本地路径
String remoteFilePath = "/shiyansan/text.txt"; // HDFS路径
// String choice = "append"; // 若文件存在则追加到文件末尾
String choice = "overwrite"; // 若文件存在则覆盖
try {
/* 判断文件是否存在 */
boolean fileExists = false;
if (CopyFromLocalFile.test(conf, remoteFilePath)) {
fileExists = true;
System.out.println(remoteFilePath + " 已存在.");
} else {
System.out.println(remoteFilePath + " 不存在.");
}
/* 进行处理 */
if (!fileExists) { // 文件不存在,则上传
CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath + " 已上传至 " + remoteFilePath);
} else if (choice.equals("overwrite")) { // 选择覆盖
CopyFromLocalFile.copyFromLocalFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath + " 已覆盖 " + remoteFilePath);
} else if (choice.equals("append")) { // 选择追加
CopyFromLocalFile.appendToFile(conf, localFilePath,
remoteFilePath);
System.out.println(localFilePath + " 已追加至 " + remoteFilePath);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
(2)从 HDFS 中下载指定文件,如果本地文件与要下载的文件名称相同,则自动对下载的文件重命名;
Shell命令实现:
if $(hadoop fs -test -e /usr/local/hadoop/text.txt);
then $(hadoop fs -copyToLocal /shiyansan/text.txt ./text.txt);
else $(hadoop fs -copyToLocal /shiyansan/text.txt ./text2.txt);
fi
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.Properties;
public class CopyToLocal {
/**
* 下载文件到本地 判断本地路径是否已存在,若已存在,则自动进行重命名
*/
public static void copyToLocal(Configuration conf, String remoteFilePath,
String localFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf)) {
File f = new File(localFilePath);
/* 如果文件名存在,自动重命名(在文件名后面加上 _0, _1 ...) */
if (f.exists()) {
System.out.println(localFilePath + " 已存在.");
Integer i = Integer.valueOf(0);
while (true) {
f = new File(localFilePath + "_" + i.toString());
if (!f.exists()) {
localFilePath = localFilePath + "_" + i.toString();
break;
} else {
i++;
continue;
}
}
System.out.println("将重新命名为: " + localFilePath);
}
// 下载文件到本地
Path localPath = new Path(localFilePath);
fs.copyToLocalFile(remotePath, localPath);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String localFilePath = "/usr/local/hadoop/text.txt"; // 本地路径
String remoteFilePath = "/shiyansan/text.txt"; // HDFS路径
try {
CopyToLocal.copyToLocal(conf, remoteFilePath, localFilePath);
System.out.println("下载完成");
} catch (Exception e) {
e.printStackTrace();
}
}
}
(3)将 HDFS 中指定文件的内容输出到终端中;
Shell命令实现:
hadoop fs -cat /shiyansan/text.txt
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.Properties;
public class Cat {
/**
* 读取文件内容
*/
public static void cat(Configuration conf, String remoteFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf);
FSDataInputStream in = fs.open(remotePath);
BufferedReader d = new BufferedReader(new InputStreamReader(in));) {
String line;
while ((line = d.readLine()) != null) {
System.out.println(line);
}
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/text.txt"; // HDFS路径
try {
System.out.println("读取文件: " + remoteFilePath);
Cat.cat(conf, remoteFilePath);
System.out.println("\n读取完成");
} catch (Exception e) {
e.printStackTrace();
}
}
}
(4)显示 HDFS 中指定的文件的读写权限、大小、创建时间、路径等信息;
Shell命令实现:
hadoop fs -ls -h /shiyansan/text.txt
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.Properties;
public class List {
/**
* 显示指定文件的信息
*/
public static void ls(Configuration conf, String remoteFilePath) {
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
FileStatus[] fileStatuses = fs.listStatus(remotePath);
for (FileStatus s : fileStatuses) {
System.out.println("路径: " + s.getPath().toString());
System.out.println("权限: " + s.getPermission().toString());
System.out.println("大小: " + s.getLen());
/* 返回的是时间戳,转化为时间日期格式 */
long timeStamp = s.getModificationTime();
SimpleDateFormat format = new SimpleDateFormat(
"yyyy-MM-dd HH:mm:ss");
String date = format.format(timeStamp);
System.out.println("时间: " + date);
}
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/text.txt"; // HDFS路径
try {
System.out.println("读取文件信息: " + remoteFilePath);
List.ls(conf, remoteFilePath);
System.out.println("\n读取完成");
} catch (Exception e) {
e.printStackTrace();
}
}
}
(5)给定 HDFS 中某一个目录,输出该目录下的所有文件的读写权限、大小、创建时间、路径等信息,如果该文件是目录,则递归输出该目录下所有文件相关信息;
Shell命令实现:
hadoop fs -ls -R -h /shiyansan
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.text.SimpleDateFormat;
public class ListDir {
/**
* 显示指定文件夹下所有文件的信息(递归)
*/
public static void lsDir(Configuration conf, String remoteDir) {
try (FileSystem fs = FileSystem.get(conf)) {
Path dirPath = new Path(remoteDir);
/* 递归获取目录下的所有文件 */
RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(
dirPath, true);
/* 输出每个文件的信息 */
while (remoteIterator.hasNext()) {
FileStatus s = remoteIterator.next();
System.out.println("路径: " + s.getPath().toString());
System.out.println("权限: " + s.getPermission().toString());
System.out.println("大小: " + s.getLen());
/* 返回的是时间戳,转化为时间日期格式 */
Long timeStamp = s.getModificationTime();
SimpleDateFormat format = new SimpleDateFormat(
"yyyy-MM-dd HH:mm:ss");
String date = format.format(timeStamp);
System.out.println("时间: " + date);
System.out.println();
}
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteDir = "/shiyansan"; // HDFS路径
try {
System.out.println("(递归)读取目录下所有文件的信息: " + remoteDir);
ListDir.lsDir(conf, remoteDir);
System.out.println("读取完成");
} catch (Exception e) {
e.printStackTrace();
}
}
}
(6)提供一个 HDFS 内的文件的路径,对该文件进行创建和删除操作。如果文件所在目录不存在,则自动创建目录;
Shell命令实现:
if $(hadoop fs -test -d /shiyansan/test1);
then $(hadoop fs -touchz /shiyansan/test1/text1.txt);
else $(hadoop fs -mkdir -p /shiyansan/test1 && hdfs dfs -touchz /shiyansan/test1/text1.txt);
fi
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.*;
import java.util.Properties;
public class RemoveOrMake {
/**
* 判断路径是否存在
*/
public static boolean test(Configuration conf, String path) {
try (FileSystem fs = FileSystem.get(conf)) {
return fs.exists(new Path(path));
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 创建目录
*/
public static boolean mkdir(Configuration conf, String remoteDir) {
try (FileSystem fs = FileSystem.get(conf)) {
Path dirPath = new Path(remoteDir);
return fs.mkdirs(dirPath);
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 创建文件
*/
public static void touchz(Configuration conf, String remoteFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf)) {
FSDataOutputStream outputStream = fs.create(remotePath);
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 删除文件
*/
public static boolean rm(Configuration conf, String remoteFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf)) {
return fs.delete(remotePath, false);
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/test1/text1.txt"; // HDFS路径
String remoteDir = "/shiyansan/test1"; // HDFS路径对应的目录
try {
/* 判断路径是否存在,存在则删除,否则进行创建 */
if (RemoveOrMake.test(conf, remoteFilePath)) {
RemoveOrMake.rm(conf, remoteFilePath); // 删除
System.out.println("删除文件: " + remoteFilePath);
} else {
if (!RemoveOrMake.test(conf, remoteDir)) { // 若目录不存在,则进行创建
RemoveOrMake.mkdir(conf, remoteDir);
System.out.println("创建文件夹: " + remoteDir);
}
RemoveOrMake.touchz(conf, remoteFilePath);
System.out.println("创建文件: " + remoteFilePath);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
(7)提供一个 HDFS 的目录的路径,对该目录进行创建和删除操作。创建目录时,如果目录文件所在目录不存在,则自动创建相应目录;删除目录时,由用户指定当该目录不为空时是否还删除该目录;
Shell命令实现:
if $(hdfs dfs -test -d /shiyansan/test2);
then $(hdfs dfs -touchz /shiyansan/test1/test1.txt);
else $(hdfs dfs -mkdir -p /shiyansan/test1);
fi
if$(hdfs dfs -rmdir /shiyansan/test1);
then $(hdfs dfs -rmdir /shiyansan/test1)
fi
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.*;
import org.apache.hadoop.io.IOUtils;
public class Seven {
/**
* @param fileSystem
* @param remotePath 目标文件夹地址
*/
private static void test7(Configuration conf, String remoteFilePath){
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
// 如果该目录文件所在目录不存在则自动创建相应目录
if (!fs.exists(remotePath)){
System.out.println("Can't find this path, the path will be created automatically");
fs.mkdirs(remotePath);
return;
}
System.out.println("Do you want to delete this dir? ( y / n )");
if (new Scanner(System.in).next().equals("y")){
FileStatus[] iterator = fs.listStatus(remotePath);
if (iterator.length != 0){
System.out.println("There are some files in this dictionary, do you sure to delete all? (y / n)");
if (new Scanner(System.in).next().equals("y")){
if (fs.delete(remotePath,true)){
System.out.println("Delete successful");
return;
}
}
}
if (fs.delete(remotePath,true)){
System.out.println("Delete successful");
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan"; // HDFS路径
try {
Seven.test7(conf, remoteFilePath);
} catch (Exception e) {
e.printStackTrace();
}
}
}
(8)向 HDFS 中指定的文件追加内容,由用户指定内容追加到原有文件的开头或结尾;
Shell命令实现:
追加到文件结尾:
hadoop fs -appendToFile local.txt /shiyansan/text.txt
追加到文件开头:
(由于没有直接的命令可以操作,方法之一是先移动到本地进行操作,再进行上传覆盖):
hadoop fs -get /shiyansan/text.txt
cat /shiyansan/text.txt >> local.txt
hadoop fs -copyFromLocal -f text.txt /shiyansan/text.txt
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.Properties;
public class AppendToFile {
/**
* 判断路径是否存在
*/
public static boolean test(Configuration conf, String path) {
try (FileSystem fs = FileSystem.get(conf)) {
return fs.exists(new Path(path));
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 追加文本内容
*/
public static void appendContentToFile(Configuration conf, String content,
String remoteFilePath) {
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
/* 创建一个文件输出流,输出的内容将追加到文件末尾 */
FSDataOutputStream out = fs.append(remotePath);
out.write(content.getBytes());
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 追加文件内容
*/
public static void appendToFile(Configuration conf, String localFilePath,
String remoteFilePath) {
Path remotePath = new Path(remoteFilePath);
try (FileSystem fs = FileSystem.get(conf);
FileInputStream in = new FileInputStream(localFilePath);) {
FSDataOutputStream out = fs.append(remotePath);
byte[] data = new byte[1024];
int read = -1;
while ((read = in.read(data)) > 0) {
out.write(data, 0, read);
}
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 移动文件到本地 移动后,删除源文件
*/
public static void moveToLocalFile(Configuration conf,
String remoteFilePath, String localFilePath) {
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
Path localPath = new Path(localFilePath);
fs.moveToLocalFile(remotePath, localPath);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 创建文件
*/
public static void touchz(Configuration conf, String remoteFilePath) {
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
FSDataOutputStream outputStream = fs.create(remotePath);
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/text.txt"; // HDFS文件
String content = "新追加的内容\n";
String choice = "after"; // 追加到文件末尾
//String choice = "before"; // 追加到文件开头
try {
/* 判断文件是否存在 */
if (!AppendToFile.test(conf, remoteFilePath)) {
System.out.println("文件不存在: " + remoteFilePath);
} else {
if (choice.equals("after")) { // 追加在文件末尾
AppendToFile.appendContentToFile(conf, content,
remoteFilePath);
System.out.println("已追加内容到文件末尾" + remoteFilePath);
} else if (choice.equals("before")) { // 追加到文件开头
/* 没有相应的api可以直接操作,因此先把文件移动到本地,创建一个新的HDFS,再按顺序追加内容 */
String localTmpPath = "/user/hadoop/tmp.txt";
AppendToFile.moveToLocalFile(conf, remoteFilePath,
localTmpPath); // 移动到本地
AppendToFile.touchz(conf, remoteFilePath); // 创建一个新文件
AppendToFile.appendContentToFile(conf, content,
remoteFilePath); // 先写入新内容
AppendToFile.appendToFile(conf, localTmpPath,
remoteFilePath); // 再写入原来内容
System.out.println("已追加内容到文件开头: " + remoteFilePath);
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
(9) 删除 HDFS 中指定的文件;
Shell命令实现:
hadoop fs -rm /shiyansan/test1/text1.txt
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.Properties;
public class Delete {
private static void DeleteFile(Configuration conf, String remoteFilePath){
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
if(fs.delete(remotePath,true)){
System.out.println("Delete success");
}else {
System.out.println("Delete failed");
}
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/test1/text1.txt"; // HDFS路径
try {
System.out.println("删除文件: " + remoteFilePath);
Delete.DeleteFile(conf, remoteFilePath);
System.out.println("\n完成");
} catch (Exception e) {
e.printStackTrace();
}
}
}
(10)在 HDFS 中,将文件从源路径移动到目的路径。
Shell命令实现:
hdfs dfs -mv /shiyansan/text.txt /shiyansan/test1/text.txt
编程:
package shiyan3;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import java.io.*;
import java.util.Properties;
public class MoveFile {
/**
* 移动文件
*/
public static boolean mv(Configuration conf, String remoteFilePath,
String remoteToFilePath) {
try (FileSystem fs = FileSystem.get(conf)) {
Path srcPath = new Path(remoteFilePath);
Path dstPath = new Path(remoteToFilePath);
return fs.rename(srcPath, dstPath);
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* 主函数
*/
public static void main(String[] args) {
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://localhost:9000");
String remoteFilePath = "/shiyansan/text.txt"; // 源文件HDFS路径
String remoteToFilePath = "/shiyansan/test1"; // 目的HDFS路径
try {
if (MoveFile.mv(conf, remoteFilePath, remoteToFilePath)) {
System.out.println("将文件 " + remoteFilePath + " 移动到 "
+ remoteToFilePath);
} else {
System.out.println("操作失败(源文件不存在或移动失败)");
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
(二)编程实现一个类“MyFSDataInputStream”,该类继承“org.apache.hadoop.fs.FSDataInputStream”,要求如下:实现按行读取 HDFS 中指定文件的方法“readLine()”,如果读到文件末尾,则返回空,否则返回文件一行的文本。
package shiyan3;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
public class MyFSDataInputStream extends FSDataInputStream{
/**
* @param args
*/
public MyFSDataInputStream(InputStream in){
super(in);
}
public static String readline(Configuration conf,String remoteFilePath){
try (FileSystem fs = FileSystem.get(conf)) {
Path remotePath = new Path(remoteFilePath);
FSDataInputStream in = fs.open(remotePath);
BufferedReader d = new BufferedReader(new InputStreamReader(in));
String line = null;
if ((line = d.readLine()) != null) {
d.close();
in.close();
return line;
}
return null;
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return null;
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
Configuration conf=new Configuration();
conf.set("fs.default.name","hdfs://localhost:9000");
String remoteFilePath="/shiyansan/text.txt";
System.out.println("读取文件:"+remoteFilePath);
System.out.println(MyFSDataInputStream.readline(conf, remoteFilePath));
System.out.println("\n读取完成");
}
}
(三)查看 Java 帮助手册或其它资料,用“java.net.URL”和“org.apache.hadoop.fs.FsURLStreamHandlerFactory”编程完成输出 HDFS 中指定文件的文本到终端中。
package shiyan3;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.Properties;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
public class FsUrl {
static{;
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
}
/**
* @param args
*/
public static void cat(String remoteFilePath){
try(InputStream in=new URL("hdfs","localhost",9000,remoteFilePath).openStream()){
IOUtils.copyBytes(in, System.out, 4096, false);
IOUtils.closeStream(in);
}catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = System.getProperties();
properties.setProperty("HADOOP_USER_NAME", "root");
String remoteFilePath="/shiyansan/text.txt";
try{
System.out.println("去读文件:"+remoteFilePath);
FsUrl.cat(remoteFilePath);
System.out.println("\n 读取完成");
}catch(Exception e){
e.printStackTrace();
}
}
}
参考:
Hadoop实验二——熟悉常用的HDFS操作
Hadoop实验——熟悉常用的HDFS操作
第三章 熟悉常用的HDFS操作
[Hadoop 2] 常用 Java API 及应用实例