package com.bigdata.util
import org.apache.hadoop.conf._
import org.apache.hadoop.fs._
import org.apache.log4j._
import scala.collection.mutable.ArrayBuffer
class HDFSUtil {
def log: Logger = Logger.getLogger(this.getClass)
val prop = new PropertiesUtil("spark_config.properties")
val conf = new Configuration()
//指定hdfs的nameservice为nameservice1,是NameNode的URI
conf.set("fs.defaultFS", "hdfs://nameservice1");
//指定hdfs的nameservice为nameservice1
conf.set("dfs.nameservices", "nameservice1");
//cluster1下面有两个NameNode,分别是namenode51,namenode56
conf.set("dfs.ha.namenodes.nameservice1", "namenode51,namenode56");
//namenode51的RPC通信地址
conf.set("dfs.namenode.rpc-address.nameservice1.namenode51", "bigdata1:8020");
//namenode56的RPC通信地址
conf.set("dfs.namenode.rpc-address.nameservice1.namenode56", "bigdata2:8020");
//配置失败自动切换实现方式
conf.set("dfs.client.failover.proxy.provider.nameservice1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
val fs = FileSystem.get(conf)
/**
* 创建文件,并向文件中写入内容
*/
def createFile(fileName: String, content: String) = {
val file = new Path(fileName)
val bytes = content.getBytes()
val output = fs.create(file)
output.write(bytes)
output.flush()
output.close()
log.info("create success,file:" + fileName)
}
}