1、安装 Zookeeper 集群
具体安装步骤参考之前的文档 https://blog.csdn.net/liyyzz33/article/details/88689594
2、安装 hadoop 集群
具体安装步骤参考之前的文档 https://blog.csdn.net/liyyzz33/article/details/88397249
这只需要根据以上安装好的集群进行修改配置
3、修改hadoop 集群
修改core-site.xml
vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://node1:9000</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/hddata/</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<!-- hadoop链接zookeeper的超时时长设置 -->
<property>
<name>ha.zookeeper.session-timeout.ms</name>
<value>1000</value>
<description>ms</description>
</property>
</configuration>
修改hdfs-site.xml
vi hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node2:50090</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>myha01</value>
</property>
<!-- myha01下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.myha01</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.myha01.nn1</name>
<value>node1:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.myha01.nn1</name>
<value>node1:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.myha01.nn2</name>
<value>node2:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.myha01.nn2</name>
<value>node2:50070</value>
</property>
<!-- 指定NameNode的edits元数据的共享存储位置。也就是JournalNode列表
该url的配置格式:qjournal://host1:port1;host2:port2;host3:port3/journalId
journalId推荐使用nameservice,默认端口号是:8485 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/myha01</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data/hadoop/data/journaldata</value>
</property>
<!-- 开启NameNode失败自动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 配置失败自动切换实现方式 -->
<property>
<name>dfs.client.failover.proxy.provider.myha01</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<property>
<name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
<value>60000</value>
</property>
</configuration>
修改mapred-site.xml
vi mapred-site.xml
<configuration>
<!-- 指定mr框架为yarn方式 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>node1:10020</value>
</property>
<!-- 任务历史服务器的web地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>node1:19888</value>
</property>
</configuration>
修改yarn-site.xml
vi yarn-site.xml
<configuration>
<!-- 开启RM高可用 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 指定RM的cluster id -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yrc</value>
</property>
<!-- 指定RM的名字 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!-- 分别指定RM的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node3</value>
</property>
<!-- 指定zk集群地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>86400</value>
</property>
<!-- 启用自动恢复 -->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!-- 制定resourcemanager的状态信息存储在zookeeper集群上 -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
</configuration>
4、将hadoop安装包重新分发到其他集群节点
scp -r /data/hadoop/hadoop-3.1.2 root@node2:/data/hadoop/
scp -r /data/hadoop/hadoop-3.1.2 root@node2:/data/hadoop/
Hadoop HA集群的初始化
1、启动ZooKeeper
node1
[root@node1]# /data/hadoop/zookeeper/bin/zkServer.sh start
[root@node1]# jps
2674 Jps
2647 QuorumPeerMain
[root@node1 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/hadoop/zookeeper/bin/../conf/zoo.cfg
Mode: follower
node2
[root@node1]# /data/hadoop/zookeeper/bin/zkServer.sh start
[root@node1]# jps
2674 Jps
2647 QuorumPeerMain
[root@node1 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/hadoop/zookeeper/bin/../conf/zoo.cfg
Mode: follower
node3
[root@node1]# /data/hadoop/zookeeper/bin/zkServer.sh start
[root@node1]# jps
2674 Jps
2647 QuorumPeerMain
[root@node1 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /data/hadoop/zookeeper/bin/../conf/zoo.cfg
Mode: leader
2、在你配置的各个journalnode节点启动该进程
按照之前的规划,我的是在hadoop1、hadoop2、hadoop3上进行启动,启动命令如下
cd /data/hadoop/hadoop-2.7.7/sbin/
./hadoop-daemon.sh start journalnode
[root@node1 bin]# jps
2739 JournalNode
2788 Jps
2647 QuorumPeerMain
3、格式化namenode
hadoop namenode -format
4、要把在node1节点上生成的元数据 给复制到 另一个namenode(node2)节点上
scp -r /data/hadoop/hddata root@node2:/data/hadoop/
5、格式化zkfc
重点强调:只能在nameonde节点进行
hdfs zkfc -formatZK
启动集群
1、启动HDFS
cd /data/hadoop/hadoop-2.7.7/sbin/
./start-dfs.sh
2、启动YARN
在主备 resourcemanager 中随便选择一台进行启动
cd /data/hadoop/hadoop-2.7.7/sbin/
./start-yarn.sh
若备用节点的 resourcemanager 没有启动起来,则手动启动起来,在node2,node3上进行手动启动
./yarn-daemon.sh start resourcemanager
3、启动 mapreduce 任务历史服务器
./mr-jobhistory-daemon.sh start historyserver
4、查看各主节点的状态
HDFS
[root@node1]# hdfs haadmin -getServiceState nn1
standby
[root@node1]# hdfs haadmin -getServiceState nn2
active
YARN
[root@node1]# yarn rmadmin -getServiceState rm1
standby
[root@node1]# yarn rmadmin -getServiceState rm2
active
[root@node1]#
5、WEB界面进行查看
HDFS
node1
node2
YARN
standby节点会自动跳到avtive节点
MapReduce历史服务器web界面