环境:
hdoop1.1.2
方法一、使用java.net.URL访问
eg.
import java.io.InputStream;
import java.net.URL;
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.io.IOUtils;
public class App1 {
private static final String HDFS_PATH = "hdfs://vm1:9000/user/root/calvin/install.log";
public static void main(String[] args) throws Exception {
// 设置hdfs url 解析器工厂类
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
URL url = new URL(HDFS_PATH);
InputStream in = url.openStream();
// 执行流拷贝操作
IOUtils.copyBytes(in, System.out, 1024, true);
}
}
方法二、使用hadoop.fs.FileSystem访问
eg.
import java.io.FileInputStream;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
public class App2 {
private static final String HDFS_PATH = "hdfs://vm1:9000";
private static final String DIR_PATH = "/d1";
private static final String FILE_PATH = "/d1/f1";
public static void main(String[] args) throws Exception {
//创建hdfs文件系统对象
final FileSystem fileSystem = FileSystem.get(new URI(HDFS_PATH), new Configuration());
//创建目录
fileSystem.mkdirs(new Path(DIR_PATH));
//上传文件
FSDataOutputStream out = fileSystem.create(new Path(FILE_PATH));
final FileInputStream in = new FileInputStream("c:/vcredist_x86.log");
IOUtils.copyBytes(in, out, 1024, true);
//下载文件
FSDataInputStream fin = fileSystem.open(new Path(FILE_PATH));
IOUtils.copyBytes(fin, System.out, 1024, true);
//删除文件
fileSystem.delete(new Path(FILE_PATH), true);
}
}