Hadoop和Zookeeper集群通用配置
NameNode节点进程:
namenode1(master:192.168.3.200): NameNode,DFSZKFailoverController,ResourceManager
namenode2(slave:192.168.3.201): NameNode,DFSZKFailoverController,ResourceManager
DatNode节点进程:
datanode1(node1:192.168.3.202): DataNode NodeManager,JournalNode,QuorumPeerMain
datanode2(node2:192.168.3.203): DataNode NodeManager,JournalNode,QuorumPeerMain
datanode3(node3:192.168.3.204): DataNode NodeManager,JournalNode,QuorumPeerMain
配置域名解析
# vim /etc/hosts
192.168.3.200 master
192.168.3.201 slave
192.168.3.202 node1
192.168.3.203 node2
192.168.3.204 node3
创建Hadoop和Zookeeper文件目录
mkdir -pv /opt/hadoop/{zookeeper,tmp}
第一步:配置Zookeeper
配置文件(/usr/local/zookeeper/conf)
cp zoo_sample.cfg zoo.cfg
# vim zoo.cfg
编辑zoo.cfg配置文件
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
i# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=/opt/hadoop/zookeeper
##
dataLogDir=/opt/hadoop/zookeeper
# the port at which the clients will connect
clientPort=2181
server.200=master:2888:3888
server.201=slave:2888:3888
server.202=node1:2888:3888
server.203=node2:2888:3888
server.204=node3:2888:3888
**********************************
在每台主机上的dataDir目录下创建myid文件,和zoo.cfg中server.id相对应。
master: echo 200 > /opt/hadoop/zookeeper/myid
salve: echo 201 > /opt/hadoop/zookeeper/myid
node1: echo 202 > /opt/hadoop/zookeeper/myid
node2: echo 203 > /opt/hadoop/zookeeper/myid
node3: echo 204 > /opt/hadoop/zookeeper/myid
**********************************
第二部:配置Hadoop
配置文件(/usr/local/hadoop/etc/hadoop/etc/hadoop)
# vim slaves
node1
node2
node3
# vim hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_91
1.# vim core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定hdfs的nameservice为nameservice1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://nameservice1</value>
</property>
<!-- 指定hadoop临时目录 -->
<!--会被dfs.namenode.name.dir目录覆盖-->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/tmp</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>
2.# vim mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
3. # vim hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--指定hdfs的nameservice为nameservice1,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>nameservice1</value>
</property>
<!-- nameservice1下面有两个NameNode,分别是namenode1,namenode2 -->
<property>
<name>dfs.ha.namenodes.nameservice1</name>
<value>namenode1,namenode2</value>
</property>
<!-- namenode1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.nameservice1.namenode1</name>
<value>master:9000</value>
</property>
<!-- namenode1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.nameservice1.namenode1</name>
<value>master:50070</value>
</property>
<!-- namenode2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.nameservice1.namenode2</name>
<value>slave:9000</value>
</property>
<!-- namenode2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.nameservice1.namenode2</name>
<value>slave:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/nameservice1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/hadoop/journal</value>
</property>
<!-- 开启NameNode失败自动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 配置失败自动切换实现方式 -->
<property>
<name>dfs.client.failover.proxy.provider.nameservice1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 配置Hadoop允许打开的最大文件数 -->
<property>
<name>dfs.datanode.max.transfer.thread</name>
<value>409600</value>
</property>
<!-- 指定数据冗余份数 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 指定datanode数据存储地址 -->
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop/data</value>
</property>
<!-- 指定namenode名称空间存储地址 -->
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/hadoop/name</value>
</property>
</configuration>
4. # vim yarn-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 开启RM高可靠 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--开启故障自动切换-->
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!--开启自动恢复功能-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--resouceManager失联后重新连接时间-->
<property>
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
<value>2000</value>
</property>
<!-- 配置ResourceManager-->
<!-- 指定RM的cluster id -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>cluster1</value>
</property>
<!-- 指定RM的名字 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!-- 分别指定RM的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>master</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>slave</value>
</property>
<!-- 指定zk集群地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<!--配置rm1-->
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>master:8088</value>
</property>
<!--配置rm2-->
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>slave:8088</value>
</property>
<!-- 配置NodeManager-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- 配置日志-->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
</configuration>
第三步:启动Zookeeper集群:
只在DataNode节点上启动(node1,node2,node3)
zkServer.sh start
//查看启动状态
zkServer.sh status
JMX enabled by default
Using config:/usr/local/zookeeper/bin/../conf/zoo.cfg
Mode:follower
第四步:启动journalnode
只在DataNode节点上启动(node1,node2,node3)。
注意:只需启动一次,之后在NameNode上启动start-dfs.sh 会自动启动。
hadoop-daemon.sh start journalnode
第五步:格式化HDFS:(在master上执行)
hdfs namenode -format
将master上的tmp目录拷贝到Slave节点(两个namenode必须同步)
scp -r /opt/hadoop/tmp slave:/opt/hadoop/
第六步:格式化ZKFC(在master上执行)
hdfs zkfc -formatZK
第七步:启动HDFS(在master上执行)
start-dfs.sh start
第八步:启动Yarn(zai master上执行)
start-yarn.sh
第九步:单独启动:Slave
yarn-daemon.sh start resourcemanager
NameNode由standby转化为active
hdfs haadmin -transitionToActive nn1 --forcemanual