一、HA集群规划(5台)
server1 namenode resourcemanager zkfc
server2 namenode resourcemanager zkfc
server3 datanode nodemanager zookeeper journalnode
server4 datanode nodemanager zookeeper journalnode
server5 datanode nodemanager zookeeper journalnode
二、搭建zk集群
1、配置zoo.cfg文件
dataDir=/opt/modules/zookeeper-3.4.5/zkdata
server.1=server3:2888:3888
server.2=server4:2888:3888
server.3=server5:2888:3888
2、创建zkdata目录,在zkdata目录下创建myid文件,编辑myid文件内容,就是此台server的zk的id号
3、启动三台zkServer
$ bin/zkServer.sh start ##启动服务
$ bin/zkServer.sh status ##查看状态
三、hadoop集群配置
1、hadoop-env.sh
export JAVA_HOME=/opt/modules/jdk1.8.0_151
export HADOOP_PID_DIR=/opt/modules/hadoop-2.7.3/pid
2、mapred-env.sh
export JAVA_HOME=/opt/modules/jdk1.8.0_151
3、yarn-env.sh
export JAVA_HOME=/opt/modules/jdk1.8.0_151
export YARN_PID_DIR=/opt/modules/hadoop-2.7.3/pid
4、core-site.xml
<configuration>
<!-- 指定hdfs的nameservice为ns1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/modules/hadoop-2.7.3/data</value>
</property>
<!--指定zookeeper运行的节点-->
<property>
<name>ha.zookeeper.quorum</name>
<value>server3:2181,server4:2181,server5:2181</value>
</property>
</configuration>
5、hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!--指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- namenode的web访问主机名:端口号 -->
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>server1:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>server1:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>server2:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>server2:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://server3:8485;server4:8485;server5:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/modules/hadoop-2.7.3/data/journal</value>
</property>
<!-- 关闭权限检查用户或用户组 -->
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行
sshfence:当Active出问题后,standby切换成Active,此时,原Active又没有停止服务,这种情况下会被强制杀死进程。
shell(/bin/true):NN Active和它的ZKFC一起挂了,没有人通知ZK,ZK长期没有接到通知,standby要切换,此时,standby调一个shell(脚本内容),这个脚本返回true则切换成功。
-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/[whoami]/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!--开启故障自动转移-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
</configuration>
6、mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>server1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>server1:19888</value>
</property>
<property>
<name>mapreduce.app-submission.cross-platform</name>
<value>true</value>
</property>
7、yarn-site.xml
<configuration>
<!-- 指定nodemanager启动时加载server的方式为shuffle server -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--启用resourcemanager ha-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>rmcluster</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>server1</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>server2</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>server3:2181,server4:2181,server5:2181</value>
</property>
<!--启用自动恢复-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--指定resourcemanager的状态信息存储在zookeeper集群-->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>106800</value>
</property>
</configuration>
8、slaves
server3
server4
server5
9、分发配置文件到其他机器
scp core-site.xml hdfs-site.xml yarn-site.xml mapred-site.xml hadoop-env.sh mapred-env.sh yarn-env.sh slaves server2:/opt/modules/hadoop-2.7.3/etc/hadoop
四、启动hadoop集群
##注意:每一个步骤都严格按照以下步骤执行
1、如果有,删除hadoop中的tmp目录和journal目录(bigdata1)
$ rm -rf data/
$ rm -rf logs/
2、启动zk(server3,server4,server5上执行)$ bin/zkServer.sh start
3、启动journalnode(server3,server4,server5上执行)$ sbin/hadoop-daemon.sh start journalnode
4、格式化hdfs(server1上执行)$ bin/hdfs namenode -format ##格式化
5、同步nn1的元数据信息(server2上执行)$ bin/hdfs namenode -bootstrapStandby ##同步数据
6、格式化ZKFC(server1上执行)$ bin/hdfs zkfc -formatZK
7、启动hdfs(server1上执行)$ sbin/start-dfs.sh
8、启动yarn$ sbin/start-yarn.sh (server1上执行)
$ sbin/yarn-daemon.sh start resourcemanager (server2上执行)
9、启动历史服务器 (server1上执行)
sbin/mr-jobhistory-daemon.sh start historyserver
10、查看webhttp://server1:50070/ ##active
http://server2:50070/ ##standby
11、手动切换namenode状态
$ bin/hdfs haadmin -transitionToActive nn1 ##切换成active
$ bin/hdfs haadmin -transitionToStandby nn1 ##切换成standby
12、查看状态$ bin/hdfs haadmin -getServiceState nn1 #查看nn1状态
$ bin/hdfs haadmin -getServiceState nn2 #查看nn2状态
$ bin/yarn rmadmin -getServiceState rm1 #查看rm1状态
$ bin/yarn rmadmin -getServiceState rm2 #查看rm2状态