编写 Java程序,编写满足如下要求八个方法:
1. 迭代显示 HDFS中的所有目录和文件
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
RemoteIterator<LocatedFileStatus> fileItera = fs .listFiles( new Path( "/" ), true );
while ( fileItera .hasNext()){
LocatedFileStatus f = fileItera .next();
System. out .println( f .getPath().getName()+ " " + f .getLen());
}
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
2. 判断 HDFS上 / input 目录是否存在,如果不存在则创建
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
out = fs .create( new Path( "/input" ), true );
out .write( "hello World" .getBytes());
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
3. 将字符串 “ hello world ”输出到 HDFS上/input/hello.txt文件 中
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
out = fs .create( new Path( "/input/hello.txt" ), true );
out .write( "hello World" .getBytes());
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
4. 打印 HDFS上/ input/hello.txt 中的内容
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
FSDataInputStream in = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
/*out=fs.create(new Path("/input/hello.txt"), true);
out.write("hello World".getBytes());*/
in = fs .open( new Path( "/input/hello.txt" ));
byte [] b = new byte [1024];
int len =0;
while (( len = in .read( b ))!=-1){
System. out .println( new String( b ,0, len ));
}
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
5. 将 HDFS上/ input/hello.txt 文件 下载到本地
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
FSDataInputStream in = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
/*out=fs.create(new Path("/input/hello.txt"), true);
out.write("hello World".getBytes());*/
/*in=fs.open(new Path("/input/hello.txt"));
byte[] b=new byte[1024];
int len =0;
while(( len =in.read(b))!=-1){
System.out.println(new String(b,0, len ));
}*/
fs .moveToLocalFile( new Path( "/input/hello.txt" ), new Path( "D:/test" ));
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
6. 删除 HDFS上/ input/hello.txt 文件
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
FSDataInputStream in = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
/*out=fs.create(new Path("/input/hello.txt"), true);
out.write("hello World".getBytes());*/
/*in=fs.open(new Path("/input/hello.txt"));
byte[] b=new byte[1024];
int len =0;
while(( len =in.read(b))!=-1){
System.out.println(new String(b,0, len ));
}*/
/*fs.moveToLocalFile(new Path("/input/hello.txt"), new Path("D:/test"));*/
fs .delete( new Path( "/input/hello.txt" ), true );
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
7. 从本地将 rxzxedu.log 复制到 HDFS上/ input 目录下
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
FSDataInputStream in = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
/*out=fs.create(new Path("/input/hello.txt"), true);
out.write("hello World".getBytes());*/
/*in=fs.open(new Path("/input/hello.txt"));
byte[] b=new byte[1024];
int len =0;
while(( len =in.read(b))!=-1){
System.out.println(new String(b,0, len ));
}*/
/*fs.moveToLocalFile(new Path("/input/hello.txt"), new Path("D:/test"));*/
/*fs.delete(new Path("/input/hello.txt"), true);*/
fs .copyFromLocalFile( new Path( "D:/test/cptest.txt" ), new Path( "/input" ) );
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
8. 从本地将 rxzxedu.log 剪切到 HDFS上/ input/tmp 目录下
package hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus ;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator ;
public class hdfs {
public static void main(String[] args ) {
// 配置账号密码
System. setProperty ( "HADOOP_USER_NAME" , "Teacher" );
System. setProperty ( "HADOOP_USER_PASSWORD" , "123456" );
// 进行连接配置
Configuration conf = new Configuration();
conf .set( "fs.defaultFS" , "hdfs://192.168.2.100:9000" );
// 获取 hdfs 连接对象
FileSystem fs = null ;
FSDataOutputStream out = null ;
FSDataInputStream in = null ;
try {
fs =FileSystem. get ( conf );
FileStatus[] list = fs .listStatus( new Path( "/hello" ));
/*for(FileStatus f:list){
System.out.println(f.getPath()+" "+f.getPath().getName());
}*/
/*RemoteIterator<LocatedFileStatus> fileItera=fs.listFiles(new Path("/"), true);
while(fileItera.hasNext()){
LocatedFileStatus f=fileItera.next();
System.out.println(f.getPath().getName()+" "+f.getLen());
}*/
/*out=fs.create(new Path("/input/hello.txt"), true);
out.write("hello World".getBytes());*/
/*in=fs.open(new Path("/input/hello.txt"));
byte[] b=new byte[1024];
int len =0;
while(( len =in.read(b))!=-1){
System.out.println(new String(b,0, len ));
}*/
/*fs.moveToLocalFile(new Path("/input/hello.txt"), new Path("D:/test"));*/
/*fs.delete(new Path("/input/hello.txt"), true);*/
//fs.copyFromLocalFile(new Path("D:/test/cptest.txt"),new Path("/input") );
fs .moveFromLocalFile( new Path( "D:/test/mvtest.txt" ), new Path( "/input" ));
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
} finally {
try {
out .close();
fs .close();
} catch (IOException e ) {
// TODO 自动生成的 catch 块
e .printStackTrace();
}
}
}
}
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/31537584/viewspace-2285657/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/31537584/viewspace-2285657/