Hadoop 版本2.7.0
使用DFSClient实现hadoop上传文件功能,采用使用输入输出流实现。
另外说明:代码运行环境中需要配置 /etc/hosts
添加
10.11.12.45 master
否则无法识别 hdfs://master:9000 报错!
package com.feng.test.hdfs;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSClient;
public class UploadFile {
/**
* @param args
*/
public static void main(String[] args) {
String hostname = "master";
int port = 9000;
String destFile = "/feng/83.txt";
String fsrc = "/Users/Feng/Development/hadoopworkspace/TestHadoop/files/83.txt";
FileInputStream fis = null;
OutputStream outputStream = null;
DFSClient client = null;
try{
//构造 Configuration
Configuration config = new Configuration();
//程序配置
config.set("hadoop.job.ugi", "feng,111111");
config.set("fs.default.name", "hdfs://master:9000");
//config.set("hadoop.tmp.dir", "/tmp/hadoop-yshengClient");
config.set("dfs.replication", "1");
config.set("mapred.job.tracker", "master:9001");
//构造DFClient
InetSocketAddress isa = new InetSocketAddress(hostname, port);
client = new DFSClient(isa, config);
//获取输出流
boolean isWrite = true;
outputStream = client.create(destFile, isWrite);
File file = new File(fsrc);
if(file.exists()){
fis = new FileInputStream(file);
byte[] buff = new byte[1024];
int readCount = 0;
readCount = fis.read(buff);
while (readCount != -1) {
outputStream.write(buff, 0, readCount);
readCount = fis.read(buff);
}
}
outputStream.flush();
System.out.println("success end!");
}catch(Exception e){
e.printStackTrace();
}finally{
if(outputStream != null){
try {
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if(fis != null){
try {
fis.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if(client != null){
try {
client.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}