高可用集群搭建

文件配置

修改三个文件的JAVA_HOME
[root@linux02 hadoop]# vi ./hadoop-env.sh 
 25 export JAVA_HOME=/opt/soft/jdk180
[root@linux02 hadoop]# vi ./yarn-env.sh
 23  export JAVA_HOME=/opt/soft/jdk180
[root@linux02 hadoop]# vi ./mapred-env.sh
 16 export JAVA_HOME=/opt/soft/jdk180


[root@linux02 hadoop]# vi ./core-site.xml
<configuration>
     20   <!-- 指定hdfs的nameservice 为 mycluster -->
     21   <property>
     22    <name>fs.defaultFS</name>
     23    <value>hdfs://mycluster/</value>
     24   </property>
     25   
     26   <!-- 指定hadoop工作目录 -->
     27   <property>
     28     <name>hadoop.tmp.dir</name>
     29     <value>/opt/soft/hadoop260/hadooptmp/</value>
     30   </property>
     31 
     32   <!-- 指定zookeeper集群访问地址 -->
     33   <property>
     34     <name>ha.zookeeper.quorum</name>
     35     <value>linux03:2181,linux04:2181,linux05:2181</value>
     36   </property>
     37 
     38   <!-- 配置为了解决以后其他组件连接HDFS集群  -->
     39   <property>
     40     <name>hadoop.proxyuser.bigdata.hosts</name>
     41     <value>*</value>
     42   </property>
     43 
     44   <property>
     45     <name>hadoop.proxyuser.bigdata.groups</name>
     46     <value>*</value>
     47   </property>
     48 </configuration>


--------------------------------------------------
[root@linux02 hadoop]# vi ./hdfs-site.xml

 <!-- 指定HDFS的nameservices为mycluster,需要跟core-site.xml中保持一致 -->
          <property>
     28     <name>dfs.nameservices</name>
     29     <value>mycluster</value>
     30   </property>
     31 
     32   <!-- 设置mycluster集群有两个namenode, 分别为nn1,nn2 -->
     33   <property>
     34     <name>dfs.ha.namenodes.mycluster</name>
     35     <value>nn1,nn2</value>
     36   </property>
     37 
     38   <!-- 配置nn1 的RPC通信地址 -->
     39   <property>
     40     <name>dfs.namenode.rpc-address.mycluster.nn1</name>
     41     <value>linux02:9000</value>
     42   </property>
     43   
     44   <!-- 配置nn1的http通信地址 -->
     45   <property>
     46     <name>dfs.namenode.http-address.mycluster.nn1</name>
     47     <value>linux02:50070</value>
     48   </property>
     49 
     50 
     51   <!-- 配置nn2 的RPC通信地址 -->
     52   <property>
     53     <name>dfs.namenode.rpc-address.mycluster.nn2</name>
     54     <value>linux03:9000</value>
     55   </property>
     56 
     57   <!-- 配置nn2的http通信地址 -->
     58   <property>
     59     <name>dfs.namenode.http-address.mycluster.nn2</name>
     60     <value>linux03:50070</value>
     61   </property>
     62 
     63   <!-- 指定JournalNode 在本地磁盘存放数据的位置 -->
     64   <property>
     65     <name>dfs.journalnode.edits.dir</name>
     66     <value>/opt/soft/hadoop260/journaldata</value>
     67   </property>
     68 
     69   <!-- 指定NameNode的edits元数据在journalNode上的服务器 -->
     70   <property>
     71     <name>dfs.namenode.shared.edits.dir</name>
     72     <value>qjournal://linux02:8485;linux03:8485;linux04:8485/mycluster</value>
     73   </property>
     74 
     75   <!-- 开启NameNode 自动切换 -->
     76   <property>
     77     <name>dfs.ha.automatic-failover.enabled</name>
     78     <value>true</value>
     79   </property>
     80 
     81   <!-- 配置nameNode失败自动切换的实现方式 -->
     82   <property>
     83     <name>dfs.client.failover.proxy.provider.mycluster</name>
     84     <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
     85   </property>
     86 
     87   <!-- 配置隔离机制方法 -->
     88   <property>
     89     <name>dfs.ha.fencing.methods</name>
     90     <value>
     91       sshfence
     92       shell(/bin/true)
     93     </value>
     94   </property>
     95 
     96   <!-- 使用sshfence隔离机制时需要ssh免密登陆 -->
     97   <property>
     98     <name>dfs.ha.fencing.ssh.private-key-files</name>
     99     <value>/root/.ssh/id_rsa</value>
    100   </property>
    101 
    102   <!-- 配置sshfence隔离机制超时时间 -->
    103   <property>
    104      <name>dfs.ha.fencing.ssh.connect-timeout</name>
    105      <value>30000</value>
    106   </property>
    107 
    108   <property>
    109     <name>dfs.webhdfs.enabled</name>
    110     <value>true</value>
    111   </property>
    112 </configuration>

-----------------------------------------
[root@linux02 hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@linux02 hadoop]# vi ./mapred-site.xml

<configuration>
     20   <!-- 指定mapreduce运算时资源调度为 yarn 模式 -->
     21   <property>
     22     <name>mapreduce.framework.name</name>
     23     <value>yarn</value>
     24   </property>
     25 
     26   <!-- 配置mapreduce历史服务器地址 端口号 -->
     27   <property>
     28     <name>mapreduce.jobhistory.address</name>
     29     <value>linux05:10020</value>
     30   </property>
     31 
     32   <!-- 配置mapreduce历史服务器WEB访问地址 -->
     33   <property>
     34     <name>mapreduce.jobhistory.webapp.address</name>
     35     <value>linux05:19888</value>
     36   </property>
 </configuration>
 
 
 
 ----------------------------------------------
 [root@linux02 hadoop]# vi ./yarn-site.xml
 <configuration>
  <!-- 开启高可用 -->
  <property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
  </property>

  <!-- 指定ResourceManager的标识:yrc -->
  <property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>yrc</value>
  </property>

  <!-- 指定RM的名字-->
  <property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
  </property>


  <!-- 指定rm1服务器 -->
  <property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>linux02</value>
  </property>

  <!-- 指定rm2服务器 -->
  <property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>linux03</value>
  </property>

  <!-- 指定rm 被管理的zk 地址 -->
  <property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>linux03:2181,linux04:2181,linux05:2181</value>
  </property>

  <!-- 运行mapreduce任务需要使用的服务 -->
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>

  <!-- 开启yarn集群的日志聚合功能 -->
  <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
  </property>

  <!-- 设置日志保存时间 -->
  <property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>86400</value>
  </property>

  <!-- 启动rm自动恢复功能 -->
  <property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
  </property>


  <!-- 制定rm 状态信息存储在zookeeper集群上 -->
  <property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
  </property>
</configuration>

-----------------------------------------
[root@linux02 hadoop]# vi ./slaves 

linux02
linux03
linux04
linux05

----------------------------------
[root@linux02 hadoop]# xsync ./hadoop260/
-----------------
[root@linux02 hadoop]# vi /etc/profile
[root@linux03 hadoop]# vi /etc/profile
[root@linux04 hadoop]# vi /etc/profile
[root@linux05 hadoop]# vi /etc/profile

export HADOOP_HOME=/opt/soft/hadoop260
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

[root@linux02hadoop]# xsync /etc/profile

 启动集群服务

第一次启动集群服务

1. 先将zookeeper集群启动
[root@linux02 hadoop]# zkop status
--------linux03 zookeeper status -----------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: follower
--------linux04 zookeeper status -----------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: leader
--------linux05 zookeeper status -----------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: follower
[root@linux02 hadoop]# 
2. 启动journalnode 
[root@linux02 hadoop]# hadoop-daemon.sh start journalnode
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux02.out
[root@linux02 hadoop]# ssh linux03 "source /etc/profile; hadoop-daemon.sh start journalnode"
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux03.out
[root@linux02 hadoop]# ssh linux04 "source /etc/profile; hadoop-daemon.sh start journalnode"
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-linux04.out
[root@linux02 hadoop]# jqop jps
-------------linux02指令信息-----------
jps
9489 Jps
9048 JournalNode
-------------linux03指令信息-----------
jps
12081 Jps
1764 QuorumPeerMain
12006 JournalNode
-------------linux04指令信息-----------
jps
2595 JournalNode
1752 QuorumPeerMain
2654 Jps
-------------linux05指令信息-----------
jps
2440 Jps
1758 QuorumPeerMain
3.格式化
[root@linux02 soft]# hadoop namenode -format
将linux02格式化后的hadooptmp文件同步到linux03
[root@linux02 hadoop260]# scp -r ./hadooptmp/ root@linux03:/opt/soft/hadoop260/
VERSION                                                                                       100%  207    43.3KB/s   00:00    
seen_txid                                                                                     100%    2     0.4KB/s   00:00    
fsimage_0000000000000000000.md5                                                               100%   62    23.9KB/s   00:00    
fsimage_0000000000000000000                                                                   100%  321   120.3KB/s   00:00 

4.初始化zookeeper
[root@linux02 hadoop260]# hdfs zkfc -formatZK
5.启动HDFS
[root@linux02 hadoop260]# start-dfs.sh
21/11/04 23:24:40 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [linux02 linux03]
linux03: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-linux03.out
linux02: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-linux02.out
linux05: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux05.out
linux04: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux04.out
linux02: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux02.out
linux03: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-linux03.out
21/11/04 23:25:02 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting ZK Failover Controllers on NN hosts [linux02 linux03]
linux02: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-linux02.out
linux03: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-linux03.out
[root@linux02 hadoop260]# jqop jps
-------------linux02指令信息-----------
jps
10852 DFSZKFailoverController
9048 JournalNode
10970 Jps
10507 NameNode
10603 DataNode
-------------linux03指令信息-----------
jps
1764 QuorumPeerMain
12006 JournalNode
13319 Jps
13082 DataNode
13019 NameNode
13211 DFSZKFailoverController
-------------linux04指令信息-----------
jps
2688 DataNode
2595 JournalNode
1752 QuorumPeerMain
2776 Jps
-------------linux05指令信息-----------
jps
2562 Jps
2473 DataNode
1758 QuorumPeerMain
6.启动yarn
[root@linux02 hadoop260]# start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /opt/soft/hadoop260/logs/yarn-root-resourcemanager-linux02.out
linux05: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux05.out
linux04: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux04.out
linux03: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux03.out
linux02: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-linux02.out
[root@linux02 hadoop260]# jqop jps
-------------linux02指令信息-----------
jps
10852 DFSZKFailoverController
11252 NodeManager
9048 JournalNode
11160 ResourceManager
11321 Jps
10507 NameNode
10603 DataNode
-------------linux03指令信息-----------
jps
13634 Jps
1764 QuorumPeerMain
12006 JournalNode
13082 DataNode
13019 NameNode
13211 DFSZKFailoverController
13501 NodeManager
-------------linux04指令信息-----------
jps
2688 DataNode
2595 JournalNode
2933 Jps
1752 QuorumPeerMain
2809 NodeManager
-------------linux05指令信息-----------
jps
2720 Jps
2596 NodeManager
2473 DataNode
1758 QuorumPeerMain
[root@linux02 hadoop260]# jqop jps
-------------linux02指令信息-----------
jps
10852 DFSZKFailoverController
11252 NodeManager
9048 JournalNode
11160 ResourceManager
10507 NameNode
10603 DataNode
11837 Jps
-------------linux03指令信息-----------
jps
13953 Jps
1764 QuorumPeerMain
13861 ResourceManager
12006 JournalNode
13082 DataNode
13019 NameNode
13211 DFSZKFailoverController
13501 NodeManager
-------------linux04指令信息-----------
jps
2688 DataNode
2960 Jps
2595 JournalNode
1752 QuorumPeerMain
2809 NodeManager
-------------linux05指令信息-----------
jps
2596 NodeManager
2473 DataNode
2747 Jps
1758 QuorumPeerMain
[root@linux02 hadoop260]# ssh linux05 "source /etc/profile; mr-jobhistory-daemon.sh start historyserver
> "
starting historyserver, logging to /opt/soft/hadoop260/logs/mapred-root-historyserver-linux05.out
[root@linux02 hadoop260]# jqop jps
-------------linux02指令信息-----------
jps
12177 Jps
10852 DFSZKFailoverController
11252 NodeManager
9048 JournalNode
11160 ResourceManager
10507 NameNode
10603 DataNode
-------------linux03指令信息-----------
jps
1764 QuorumPeerMain
13861 ResourceManager
12006 JournalNode
13082 DataNode
13019 NameNode
13211 DFSZKFailoverController
13501 NodeManager
14287 Jps
-------------linux04指令信息-----------
jps
2688 DataNode
2595 JournalNode
1752 QuorumPeerMain
2809 NodeManager
2988 Jps
-------------linux05指令信息-----------
jps
2596 NodeManager
2871 Jps
2473 DataNode
2794 JobHistoryServer
1758 QuorumPeerMain

查看日志 

[root@linux05 ~]# cd /opt/soft/hadoop260/logs
[root@linux05 logs]# vi ./mapred-root-historyserver-linux05.log
[root@linux05 logs]# cat mapred-root-historyserver-linux05.log

所需脚本链接:https://blog.csdn.net/liuyongsheng666/article/details/121122238?spm=1001.2014.3001.5501

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值