一、[Hadoop-HA-HDFS-HA搭建]:HDFS-HA集群配置
### --- HDFS-HA集群配置地址
https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoophdfs/HDFSHighAvailabilityWithQJM.html
二、环境准备
### --- 环境准备流程
~~~ 修改IP
~~~ 修改主机名及主机名和IP地址的映射
~~~ 关闭防火墙
~~~ ssh免密登录
~~~ 安装JDK,配置环境变量等
三、集群规划
linux121 | linux122 | linux123 |
NameNode | NameNode | |
JournalNode | JournalNode | JournalNode |
DataNode | DataNode | DataNode |
ZK | ZK | ZK |
ResourceManager | ||
NodeManager | NodeManager | NodeManager |
四、启动Zookeeper集群
### --- 启动zookeeper集群
[root@linux121 bin]# zk.sh start
### --- 查看状态
[root@linux121 bin]# zk.sh status
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: follower
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: follower
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: leader
### --- zookeeper进程
[root@linux121 bin]# jps
9561 QuorumPeerMain
五、配置HDFS-HA集群
### --- 停止原先HDFS集群
[root@linux121 ~]# stop-dfs.sh
### --- 在所有节点,/opt/yanqi/servers目录下创建一个ha文件夹
[root@localhost ~]# mkdir /opt/yanqi/servers/ha
### --- 将/opt/yanqi/servers/目录下的 hadoop-2.9.2复制到ha目录下
[root@linux121 servers]# pwd
/opt/yanqi/servers
[root@linux121 servers]# cp -r hadoop-2.9.2 ha
### --- 删除原集群data目录
[root@linux121 ~]# rm -rf /opt/yanqi/servers/ha/hadoop-2.9.2/data
### --- 配置hdfs-site.xml
[root@linux121 ~]# vim /opt/yanqi/servers/ha/hadoop-2.9.2/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservices</name>
<value>yanqicluster</value>
</property>
<property>
<name>dfs.ha.namenodes.yanqicluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.yanqicluster.nn1</name>
<value>linux121:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.yanqicluster.nn2</name>
<value>linux122:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.yanqicluster.nn1</name>
<value>linux121:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.yanqicluster.nn2</name>
<value>linux122:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://linux121:8485;linux122:8485;linux123:8485/yanqi</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.yanqicluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/journalnode</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
### --- 配置core-site.xml
[root@linux121 ~]# vim /opt/yanqi/servers/ha/hadoop-2.9.2/etc/hadoop/core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://yanqicluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/yanqi/servers/ha/hadoop-2.9.2/data/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>linux121:2181,linux122:2181,linux123:2181</value>
</property>
### --- 拷贝配置好的hadoop环境到其他节点
[root@linux121 ~]# rsync-script /opt/yanqi/servers/ha/hadoop-2.9.2/