导入你Hadoop相应的jar包
可以在这里查看:https://repository.cloudera.com/artifactory/cloudera-repos/
根据你Hadoop的依赖一步一步寻找,例如我这里Hadoop就是org目录下的apache…,一步一步找到即可
<dependencies>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<!--添加Hadoop依赖包-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0-cdh5.15.0</version>
</dependency>
</dependencies>
jar包依赖很多,估计要下个5分钟左右
代码
根据代码中的注释进行相应的修改即可
package com.jbit.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;
import java.io.*;
import java.net.URI;
import java.net.URISyntaxException;
public class Test {
public static Configuration configuration;
public static FileSystem fs;
static {
try {
configuration=new Configuration();
//第一个参数: HDFS的UTI
//第二个参数: 客户端指定的配置参数
//第三个参数: 客户端的用户名
fs = FileSystem.get(new URI("hdfs://192.168.1.100:9000") , configuration , "root");
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
public static void main(String[] args) throws IOException {
//createMkdir();
//createFile();
//copyFile();
check();
//updateFile();
//listFiles();
//listFileRecursive();
//getFileBlocak();
//delete();
}
/**
* 创建文件夹
*/
public static void createMkdir() throws IOException {
//创建文件夹
fs.mkdirs(new Path("/g"));
}
/**
* 创建文件
* @throws IOException
*/
public static void createFile() throws IOException {
fs.create(new Path("/aa/a.txt"));
}
/**
* 将本地文件移动,复制到hdfs中
* @throws IOException
*/
public static void copyFile() throws IOException {
//将本地文件移动到hdfs中(本地的文件将不存在)
//fs.moveFromLocalFile(new Path("D:\\a.txt"),new Path("/g"));
//将本地文件拷贝到hdfs中(本地的文件还在)
//fs.copyFromLocalFile(new Path("D:\\a.txt"),new Path("/g"));
//拷贝大文件(带进度)
//确定文件位置
InputStream in=new BufferedInputStream(new FileInputStream(new File("D:\\2345Soft.zip")));
//创建HDFS里面的文件
FSDataOutputStream fsDataOutputStream = fs.create(new Path("/gggggg"), new Progressable() {
public void progress() {
System.out.print("#");
}
});
//拷贝过去
IOUtils.copyBytes(in,fsDataOutputStream,4096);
}
/**
* 查看目标文件夹下的所有文件
*/
public static void listFiles() throws IOException {
FileStatus[] statuses=fs.listStatus(new Path("/"));
for(FileStatus file : statuses) {
String isDir = file.isDirectory() ? "文件夹" : "文件";
//权限
String permission = file.getPermission().toString();
//主从数量
short replication = file.getReplication();
long length = file.getLen();
String path = file.getPath().toString();
System.out.println(isDir + "\t" + permission + "\t" + replication + "\t" +length + "\t" +path);
}
}
/**
*另一种方式查看目标文件夹下的所有文件
* @throws IOException
*/
public static void listFileRecursive() throws IOException {
RemoteIterator<LocatedFileStatus> files=fs.listFiles(new Path("/test"),true);
while(files.hasNext()){
LocatedFileStatus file=files.next();
String isDir=file.isDirectory() ? "文件夹" : "文件";
String permission=file.getPermission().toString();
short replication=file.getReplication();
long length=file.getLen();
String path=file.getPath().toString();
System.out.println(isDir + "\t" + permission
+ "\t" + replication + "\t" + length
+ "\t" + path
);
}
}
/**
* 查看文件块信息
* @throws IOException
*/
public static void getFileBlocak() throws IOException {
FileStatus fileStatus=fs.getFileStatus(new Path("/test/test2.txt"));
BlockLocation[] blocks=fs.getFileBlockLocations(fileStatus,0,fileStatus.getLen());
for(BlockLocation block : blocks){
for(String name : block.getNames()){
System.out.println(name + " : " + block.getOffset() + " : " +block.getLength() + " : " + block.getHosts());
}
}
}
/**
* 查看文件中的内容
* @throws IOException
*/
public static void check() throws IOException {
FSDataInputStream open = fs.open(new Path("/g/b.txt"));
IOUtils.copyBytes(open,System.out,1024);
}
/**
* 更改文件名称
* @throws IOException
*/
public static void updateFile() throws IOException {
boolean rename = fs.rename(new Path("/g/a.txt"), new Path("/g/b.txt"));
System.out.println(rename);
}
/**
* 删除文件,也可以删除文件夹
* @throws IOException
*/
public static void delete() throws IOException {
boolean result = fs.delete(new Path("/b"), true);
System.out.println(result);
}
}
如果出现下面的这个错误
File /a1 could only be replicated to 0 nodes instead of minReplication (=1).
There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
1、修改slaves的配置,改为当前机器的外网IP地址(注意如果已经开启,先关闭HDFS)
ifconfig 查看自己IP
修改文件
vim /hadoop-2.6.0-cdh5.15.1/etc/hadoop/slaves
2、删除/home/hadoop/app/tmp
rm -rf /home/hadoop/app/tmp
也就是hdfs-site.xml文件的路径
3、重新数据初始化
./hdfs namenode -format
4、重新启动hdfs
./start-dfs.sh