1,必要的基础:四节点的非高可用集群,第四个节点的zookeeper可设置为observer
2,在此基础上,修改core-site.xml为:
<configuration>
<!-- 集群初始配置 -->
<!-- 此处开启HA,因此需要将fs.default.name修改为fs.defaultFS -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns</value>
</property>
<!-- 这里也可以改一下高可用存储数据的路径,文件夹需自建 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/hadoop/hadoop-2.7.3/hdfs/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>fs.checkpoint.period</name>
<value>60</value>
</property>
<property>
<name>fs.checkpoint.size</name>
<value>67108864</value>
</property>
<!-- 集群初始配置结束,上面有修改,下面为增加-->
<property>
<name>ha.zookeeper.quorum</name>
<value>slave1:2181,slave2:2181,slave3:2181</value>
</property>
</configuration>
3,修改hdfs-site.xml为:
<configuration>
<!-- 初始配置 -->
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/name</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/data</value>
<final>true</final>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>master:50070</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<!--初始配置结束 -->
<!-- 高可用增加配置 -->
<property>
<name>dfs.nameservices</name>
<value>ns</value>
</property>
<property>
<name>dfs.ha.namenodes.ns</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1</name>
<value>master:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2</name>
<value>slave1:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn1</name>
<value>master:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn2</name>
<value>slave1:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master:8485;slave1:8485/ns</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/hadoop/hadoop-2.7.3/journaldata</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.ns</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 这里要考虑一下,rsa与dsa -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 高可用配置结束 -->
</configuration>
4,修改yarn-site.xml为:
<configuration>
<!-- 在普通集群基础上除指定reducer外全要变 -->
<!-- 开启RM高可用 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 这里与普通集群基本配置属性一样,就是需要配两个rm -->
<!-- 指定YARN HA的名称 -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>jyarn</value>
</property>
<!-- 指定两个resourcemanager的名称 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!-- 配置rm1,rm2的主机 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>slave2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>slave3</value>
</property>
<!-- 配置zookeeper的地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>master:2181,slave1:2181,slave2:2181,slave3:2181</value>
</property>
<!-- 配置zookeeper的存储位置 -->
<property>
<name>yarn.resourcemanager.zk-state-store.parent-path</name>
<value>/rmstore</value>
</property>
<!-- 指定reducer获取数据的方式-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- 指定ResourceManager的端口-->
<property>
<name>yarn.resourcemanager.address.rm1</name>
<value>slave2:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm1</name>
<value>slave2:18030</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>slave2:18088</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm1</name>
<value>slave2:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm1</name>
<value>slave2:18141</value>
</property>
<property>
<name>yarn.resourcemanager.address.rm2</name>
<value>slave3:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rm2</name>
<value>slave3:18030</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>slave3:18088</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rm2</name>
<value>slave3:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rm2</name>
<value>slave3:18141</value>
</property>
<!-- 启用自动故障转移 -->
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 开启yarn resourcemanager restart -->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!-- 配置resourcemanager的状态存储到zookeeper中 -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!-- 开启yarn nodemanager restart -->
<property>
<name>yarn.nodemanager.recovery.enabled</name>
<value>true</value>
</property>
<!-- 以下非必须配置 -->
<!-- 配置nodemanager IPC的通信端口 -->
<property>
<name>yarn.nodemanager.address</name>
<value>0.0.0.0:45454</value>
</property>
<!-- 开启 YARN 集群的日志聚合功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- YARN 集群的聚合日志最长保留时长 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<!--1天-->
<value>86400</value>
</property>
</configuration>
分发命令(3个文件3个节点都要):
scp -r core-site.xml root@slave1:/usr/hadoop/hadoop-2.7.3/etc/hadoop/
5,执行yum -y install psmisc下载,确保自动转移机制正常运行
6,启动zookeeper
7,启动journalnode进程初始化namenode
在master,slave1这两个journalnode节点上启动journalnode进程
hadoop-daemon.sh start journalnode
在master上初始化文件系统
hadoop namenode -format
在master上启动namenode进程
hadoop-daemon.sh start namenode
在slave1上同步namenode元数据
hadoop namenode -bootstrapStandby
在master或slave1上格式化zkfc
hdfs zkfc -formatZK
在master和slave1上停掉journode进程
hadoop-daemon.sh stop journalnode
在master和slave1上停掉namenode进程
hadoop-daemon.sh stop namenode
8,启动文件系统
在master上执行start-dfs.sh
在slave2上执行start-yarn.sh
在slave3上执行yarn-daemon.sh start resourcemanager
在master上执行mr-jobhistory-daemon.sh start historyserver
9,通过网页访问master和slave1的50070端口(http://xxx.xxx.xxx.xxx:50070),由网页显示可见一个节点处于active,一个节点处于standby
10,通过网页访问slave2和slave3的18088端口
11,通过网页访问master的19888端口
12,在9中所述网页显示为active的节点上执行
hadoop-daemon.sh stop namenode
再次访问该节点的50070端口,可见网页无法访问,再次访问9中所述处于standby节点的50070端口,可见该节点状态由standby变为active
在刚刚停掉namenode进程的该节点上执行
hadoop-daemon.sh start namenode
启动namenode进程后再次访问该节点的50070端口,可见网页正常访问,同时状态为standby,容灾成功
13,如果要通过hive客户端使用hive,需要保证hive客户端所在节点处于active