hdfs的安全模式
安全模式是HDFS所处的一种特殊状态,在这种状态下,文件系统只接受只读数据请求,而不接受删除、修改等请求
- HDFS启动时,DataNode向NameNode注册汇报。汇报可用的block状态。
- NameNode判断有
99.9%
的block满足最小副本数(1,与设置的副本数无关)就会退出安全模式 - HDFS启动时,默认
30秒
处于安全期,过了安全期,才可以对集群进行操作 - 集群启动后每
3
秒发送一次心跳
hdfs dfsadmin -safemode
Usage: hdfs dfsadmin [-safemode enter | leave | get | wait]
HDFS与javaAPI开发
Windows下hadoop环境变量配置
1、下载 hadoop并压缩到没有中文没有空格的目录下
2、然后在windows当中配置hadoop的环境变量
- HADOOP_HOME设置成对应的路径
3、然后将hadoop.dll文件拷贝到C:\Windows\System32
25
文件系统配置项的3种重载方式
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
//第三个参数为用户
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf);
新建,重命名、删除,上传
@Test
public void mkdir() throws IOException {
//配置项
Configuration conf = new Configuration();
//设置需要连接的HDFS集群
conf.set("fs.defaultFS","hdfs://node01:8020");
//获得文件系统
FileSystem fileSystem = FileSystem.get(conf);
//调用方法
fileSystem.mkdirs(new Path("/0519/mkdir/rec.txt"));
//释放资源
fileSystem.close();
}
@Test
public void mkdir2() throws IOException, URISyntaxException, InterruptedException {
//配置项
Configuration conf = new Configuration();
//设置需要连接的HDFS集群
//conf.set("fs.defaultFS","hdfs://node01:8020");
//获得文件系统
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
//调用方法
fileSystem.mkdirs(new Path("/0519/mkdir2"));
//释放资源
fileSystem.close();
}
@Test
public void uploadFile() throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
fileSystem.copyFromLocalFile(new Path("file:///F:\\BigData\\data\\testData.txt"),
new Path("hdfs://node01:8020/0519"));
fileSystem.close();
}
@Test
public void downloadFile() throws IOException{
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
fileSystem.copyToLocalFile(false,new Path("hdfs://node01:8020//0519")
,new Path("file:///F:\\BigData\\data"));
fileSystem.close();
}
@Test
public void deleteFile() throws IOException{
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
fileSystem.delete(new Path("hdfs://node01:8020/0519"),true);
fileSystem.close();
}
@Test
public void re_nameFile() throws IOException{
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
fileSystem.rename(new Path("hdfs://node01:8020//mkdir010603")
,new Path("hdfs://node01:8020//mkdir010602"));
fileSystem.close();
}
/*部分源码
public class LocatedFileStatus extends FileStatus {
private BlockLocation[] locations;
public class FileStatus implements Writable, Comparable {
private Path path;
private long length;
private boolean isdir;
private short block_replication;
private long blocksize;
private long modification_time;
private long access_time;
private FsPermission permission;
private String owner;
private String group;
private Path symlink;
*/
@Test
public void checkFileInfo() throws IOException{
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://node01:8020");
FileSystem fileSystem = FileSystem.get(conf);
RemoteIterator<LocatedFileStatus> iterator = fileSystem.listFiles(new Path("/"), true);
while (iterator.hasNext()){
LocatedFileStatus status = iterator.next();
//file name
System.out.println(status.getPath().getName());
//length
System.out.println(status.getLen());
//permission
System.out.println(status.getPermission());
//group
System.out.println(status.getGroup());
//block infomation
BlockLocation[] blockLocations = status.getBlockLocations();
for (BlockLocation blockLocation :
blockLocations) {
// Datanode hostnames
String[] hosts = blockLocation.getHosts();
for (String host: hosts
) {
System.out.println(host);
}
}
}
fileSystem.close();
}
@Test
public void streamUploadFile() throws URISyntaxException, IOException {
//从本地输出到HDFS,调用java的FileInputStream输入流。HDFS的FSDataOutputStream的输出流
//get filesystem
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf);
//create input iostream
FileInputStream fis = new FileInputStream(new File("F:\\BigData\\data\\testData.txt"));
//get output iostream
FSDataOutputStream fos = fileSystem.create(new Path("hdfs://node01:8020/0519/rec.txt"));
//connect stream
IOUtils.copy(fis,fos);
//close resource
IOUtils.closeQuietly(fos);
IOUtils.closeQuietly(fis);
fileSystem.close();
}
@Test
public void streamDownloadFile() throws URISyntaxException, IOException {
//从HDFS输出到本地,调用HDFS的FSDataInputStream出入流,调用java的FileOutputStream输出流
//会覆盖本地文件内容
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01/8020"), conf);
FSDataInputStream fis = fileSystem.open(new Path("hdfs://node01:8020/0519//rec.txt"));
FileOutputStream fos = new FileOutputStream(new File("F:\\BigData\\data\\testData.txt"));
IOUtils.copy(fis,fos);
IOUtils.closeQuietly(fis);
IOUtils.closeQuietly(fos);
fileSystem.close();
}
@Test
public void mergeFile() throws URISyntaxException, IOException, InterruptedException {
Configuration conf = new Configuration();
FileSystem fileSystem = FileSystem.get(new URI("hdfs://node01:8020"), conf, "hadoop");
//获取HDFS输出流
FSDataOutputStream fos = fileSystem.create(new Path("hdfs://node01:8020/mergedFile.xml"));
//获得本地文件系统LocalFileSystem
LocalFileSystem localFileSystem = FileSystem.getLocal(new Configuration(conf));
//读取本地的文件
FileStatus[] fileStatuses = localFileSystem.listStatus(new Path("F:\\BigData\\data"));
//读取所有本地小文件,写入到hdfs的大文件里去
for (FileStatus fileStatus :
fileStatuses) {
//获取每一个文件的路径
Path path = fileStatus.getPath();
System.out.println(path.getName());
//读取本地小文件 使用fileSystem把path理解成HDFS路径
//FSDataInputStream fis = fileSystem.open(path);
FSDataInputStream fis = localFileSystem.open(path);
IOUtils.copy(fis,fos);
IOUtils.closeQuietly(fis);
}
IOUtils.closeQuietly(fos);
localFileSystem.close();
fileSystem.close();
}