Hadoop集群版 高可用

在这里插入图片描述
所需脚本网址
jqop https://blog.csdn.net/qq_51903852/article/details/121120429
xsync https://blog.csdn.net/qq_51903852/article/details/121141166
zkop https://blog.csdn.net/qq_51903852/article/details/121120516

[root@mihaoyu152 install]# tar -zxvf hadoop-2.6.0-cdh5.14.2.tar.gz -C ../soft

[root@mihaoyu152 soft]# mv hadoop-2.6.0-cdh5.14.2/ hadoop260

[root@mihaoyu152 hadoop]# vi ./hadoop-env.sh 
25 export JAVA_HOME=/opt/soft/jdk180

[root@mihaoyu152 hadoop]# vi ./yarn-env.sh 
23 export JAVA_HOME=/opt/soft/jdk180

[root@mihaoyu152 hadoop]# vi ./mapred-env.sh
16 export JAVA_HOME=/opt/soft/jdk180

[root@mihaoyu152 hadoop]# vi ./core-site.xml 
<configuration>
  <!-- 指定的hdfs的nameservice 为mycluster -->
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://mycluster/</value>
  </property>

  <!-- 指定的hadoop工作目录 -->
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/opt/soft/hadoop260/hadooptmp/</value>
  </property>

  <!-- 指定zookeeper集群访问地址 -->
  <property>
    <name>ha.zookeeper.quorum</name>
    <value>mihaoyu153:2181,mihaoyu154:2181,mihaoyu155:2181</value>
  </property>

  <!-- 配置为了解决以后其他组件连接HDFS集群 -->
  <property>
    <name>hadoop.proxyuser.bigdata.hosts</name>
    <value>*</value>
  </property>

  <property>
    <name>hadoop.proxyuser.bigdata.groups</name>
    <value>*</value>
  </property>
</configuration>

[root@mihaoyu152 hadoop]# vi ./hdfs-site.xml 
<configuration>
  <!-- 指定副本数 -->
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>

  <!-- 指定HDFS的nameservices为mycluster,需要跟core-site.xml中保持一致 -->
  <property>
    <name>dfs.nameservices</name>
    <value>mycluster</value>
  </property>

  <!-- 设置mycluster集群有两个namenode,分别为nn1,nn2 -->
  <property>
    <name>dfs.ha.namenodes.mycluster</name>
    <value>nn1,nn2</value>
  </property>

  <!-- 配置nn1的RPC通信地址 -->
  <property>
    <name>dfs.namenode.rpc-address.mycluster.nn1</name>
    <value>mihaoyu152:9000</value>
  </property>

  <!-- 配置nn1的http通信地址 -->
  <property>
    <name>dfs.namenode.http-address.mycluster.nn1</name>
    <value>mihaoyu152:50070</value>
  </property>

  <!-- 配置nn2的RPC通信地址 -->
  <property>
    <name>dfs.namenode.rpc-address.mycluster.nn2</name>
    <value>mihaoyu153:9000</value>
  </property>

  <!-- 配置nn2的http通信地址 -->
  <property>
    <name>dfs.namenode.http-address.mycluster.nn2</name>
    <value>mihaoyu153:50070</value>
  </property>

  <!-- 指定JournalNode 在本地磁盘存放数据的位置 -->
  <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/opt/soft/hadoop260/journaldata</value>
  </property>

  <!-- 指定NameNode的edits元数据在journalNode上的服务器 -->
  <property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://mihaoyu152:8485;mihaoyu153:8485;mihaoyu154:8485/mycluster</value>
  </property>

  <!-- 开启NameNode 自动切换 -->
  <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
  </property>

  <!-- 配置namenode失败自动切换的实现方式 -->
  <property>
    <name>dfs.client.failover.proxy.provider.mycluster</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  </property>

  <!-- 配置隔离机制方法 -->
  <property>
    <name>dfs.ha.fencing.methods</name>
    <value>
      sshfence
      shell(/bin/true)
    </value>
  </property>

  <!-- 使用sshfence隔离机制时需要ssh免密登陆 -->
  <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/root/.ssh/id_rsa</value>
  </property>

  <!-- 配置sshfence隔离机制超时时间 -->
  <property>
    <name>dfs.ha.fencing.ssh.connect-timeout</name>
    <value>30000</value>
  </property>

  <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
  </property>
</configuration>

[root@mihaoyu152 hadoop]# cp mapred-site.xml.template mapred-site.xml

[root@mihaoyu152 hadoop]# vi ./mapred-site.xml
<configuration>
  <!-- 指定mapreduce运算时资源调度框架为 yarn 模式 -->
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>

  <!-- 配置mapreduce历史服务器地址 端口号 -->
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>mihaoyu155:10020</value>
  </property>

  <!-- 配置mapreduce历史服务器WEB访问地址 -->
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>mihaoyu155:19888</value>
  </property>
</configuration>

[root@mihaoyu152 hadoop]# vi ./yarn-site.xml
<configuration>
  <!-- 开启高可用 -->
  <property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
  </property>

  <!-- 指定ResourceManager的标识:yrc -->
  <property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>yrc</value>
  </property>

  <!-- 指定RM的名字 -->
  <property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
  </property>

  <!-- 指定rm1服务器 -->
  <property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>mihaoyu152</value>
  </property>

  <!-- 指定rm2服务器 -->
  <property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>mihaoyu153</value>
  </property>

  <!-- 指定rm 被管理的zk 地址 -->
  <property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>mihaoyu153:2181,mihaoyu154:2181,mihaoyu155:2181</value>
  </property>

  <!-- 运行mapreduce任务时需要使用的服务 -->
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>

  <!-- 开启yarn集群的日志聚合功能 -->
  <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
  </property>

  <!-- 设置日志保存时间 -->
  <property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>86400</value>
  </property>

  <!-- 启动rm自动恢复功能 -->
  <property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
  </property>

  <!-- 制定rm 状态信息存储在zookeeper集群上 -->
  <property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
  </property>
</configuration>

[root@mihaoyu152 hadoop]# vi ./slaves
mihaoyu152
mihaoyu153
mihaoyu154
mihaoyu155

[root@mihaoyu152 hadoop]# vi /etc/profile
#hadoop
export HADOOP_HOME=/opt/soft/hadoop260
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

[root@mihaoyu152 soft]# source /etc/profile

[root@mihaoyu152 soft]# xsync hadoop260

[root@mihaoyu152 soft]# xsync /etc/profile

第一次启动集群服务

1、先将zookeeper启动
[root@mihaoyu152 soft]# zkop start
--------------- mihaoyu153 zookeeper start ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
--------------- mihaoyu154 zookeeper start ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
--------------- mihaoyu155 zookeeper start ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

[root@mihaoyu152 soft]# zkop status
--------------- mihaoyu153 zookeeper status ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: follower
--------------- mihaoyu154 zookeeper status ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: leader
--------------- mihaoyu155 zookeeper status ---------------
JMX enabled by default
Using config: /opt/soft/zookeeper345/bin/../conf/zoo.cfg
Mode: follower

2、启动journalnode
[root@mihaoyu152 soft]# hadoop-daemon.sh start journalnode
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-mihaoyu152.out

[root@mihaoyu152 soft]# ssh mihaoyu153 "source /etc/profile; hadoop-daemon.sh start journalnode"
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-mihaoyu153.out
[root@mihaoyu152 soft]# ssh mihaoyu154 "source /etc/profile; hadoop-daemon.sh start journalnode"
starting journalnode, logging to /opt/soft/hadoop260/logs/hadoop-root-journalnode-mihaoyu154.out
[root@mihaoyu152 soft]# jqop jps
--------------- mihaoyu152 指令信息 ---------------
jps
5986 Jps
5894 JournalNode
--------------- mihaoyu153 指令信息 ---------------
jps
4676 QuorumPeerMain
4807 Jps
4748 JournalNode
--------------- mihaoyu154 指令信息 ---------------
jps
3570 JournalNode
3493 QuorumPeerMain
3629 Jps
--------------- mihaoyu155 指令信息 ---------------
jps
3378 Jps
3318 QuorumPeerMain

3、格式化 namenode mihaoyu152 mihaoyu153
[root@mihaoyu152 soft]# hadoop namenode -format

[root@mihaoyu152 soft]# cd ./hadoop260/
将mihaoyu152格式化后的hadooptmp文件同步到mihaoyu153
[root@mihaoyu152 hadoop260]# scp -r ./hadooptmp/ root@mihaoyu153:/opt/soft/hadoop260/
VERSION                                                         100%  205   235.0KB/s   00:00    
seen_txid                                                       100%    2     2.4KB/s   00:00    
fsimage_0000000000000000000.md5                                 100%   62   106.3KB/s   00:00    
fsimage_0000000000000000000                                     100%  320   565.5KB/s   00:00    

4、初始化zookeeper
[root@mihaoyu152 hadoop260]# hdfs zkfc -formatZK

[root@mihaoyu153 hadoop260]# zkCli.sh 

5、启动HDFS
[root@mihaoyu152 hadoop260]# start-dfs.sh 
21/11/04 21:40:35 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [mihaoyu152 mihaoyu153]
mihaoyu153: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-mihaoyu153.out
mihaoyu152: starting namenode, logging to /opt/soft/hadoop260/logs/hadoop-root-namenode-mihaoyu152.out
mihaoyu153: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-mihaoyu153.out
mihaoyu155: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-mihaoyu155.out
mihaoyu154: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-mihaoyu154.out
mihaoyu152: starting datanode, logging to /opt/soft/hadoop260/logs/hadoop-root-datanode-mihaoyu152.out
Starting journal nodes [mihaoyu152 mihaoyu153 mihaoyu154]
mihaoyu153: journalnode running as process 4748. Stop it first.
mihaoyu154: journalnode running as process 3570. Stop it first.
mihaoyu152: journalnode running as process 5894. Stop it first.
21/11/04 21:40:47 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting ZK Failover Controllers on NN hosts [mihaoyu152 mihaoyu153]
mihaoyu153: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-mihaoyu153.out
mihaoyu152: starting zkfc, logging to /opt/soft/hadoop260/logs/hadoop-root-zkfc-mihaoyu152.out

[root@mihaoyu152 hadoop260]# jqop jps
--------------- mihaoyu152 指令信息 ---------------
jps
6581 DFSZKFailoverController
5894 JournalNode
6312 DataNode
6185 NameNode
6666 Jps
--------------- mihaoyu153 指令信息 ---------------
jps
4883 NameNode
4676 QuorumPeerMain
5147 Jps
4748 JournalNode
4972 DataNode
5085 DFSZKFailoverController
--------------- mihaoyu154 指令信息 ---------------
jps
3570 JournalNode
3493 QuorumPeerMain
3767 Jps
3659 DataNode
--------------- mihaoyu155 指令信息 ---------------
jps
3318 QuorumPeerMain
3495 Jps
3407 DataNode

6、启动yarn
[root@mihaoyu152 hadoop260]# start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /opt/soft/hadoop260/logs/yarn-root-resourcemanager-mihaoyu152.out
mihaoyu154: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-mihaoyu154.out
mihaoyu153: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-mihaoyu153.out
mihaoyu155: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-mihaoyu155.out
mihaoyu152: starting nodemanager, logging to /opt/soft/hadoop260/logs/yarn-root-nodemanager-mihaoyu152.out
[root@mihaoyu152 hadoop260]# jqop jps
--------------- mihaoyu152 指令信息 ---------------
jps
7139 Jps
6581 DFSZKFailoverController
6821 NodeManager
5894 JournalNode
6312 DataNode
6185 NameNode
6719 ResourceManager
--------------- mihaoyu153 指令信息 ---------------
jps
4883 NameNode
4676 QuorumPeerMain
5188 NodeManager
4748 JournalNode
4972 DataNode
5085 DFSZKFailoverController
5310 Jps
--------------- mihaoyu154 指令信息 ---------------
jps
3570 JournalNode
3922 Jps
3493 QuorumPeerMain
3800 NodeManager
3659 DataNode
--------------- mihaoyu155 指令信息 ---------------
jps
3650 Jps
3318 QuorumPeerMain
3528 NodeManager
3407 DataNode

[root@mihaoyu152 hadoop260]# ssh mihaoyu153 "source /etc/profile; yarn-daemon.sh start resourcemanager"
starting resourcemanager, logging to /opt/soft/hadoop260/logs/yarn-root-resourcemanager-mihaoyu153.out

[root@mihaoyu152 hadoop260]# jqop jps
--------------- mihaoyu152 指令信息 ---------------
jps
6581 DFSZKFailoverController
6821 NodeManager
5894 JournalNode
6312 DataNode
6185 NameNode
7179 Jps
6719 ResourceManager
--------------- mihaoyu153 指令信息 ---------------
jps
4883 NameNode
4676 QuorumPeerMain
5188 NodeManager
5352 ResourceManager
5418 Jps
4748 JournalNode
4972 DataNode
5085 DFSZKFailoverController
--------------- mihaoyu154 指令信息 ---------------
jps
3570 JournalNode
3493 QuorumPeerMain
3800 NodeManager
3659 DataNode
3948 Jps
--------------- mihaoyu155 指令信息 ---------------
jps
3318 QuorumPeerMain
3528 NodeManager
3676 Jps
3407 DataNode

7、启动historyserver
[root@mihaoyu152 hadoop260]# ssh mihaoyu155 "source /etc/profile; mr-jobhistory-daemon.sh start historyserver"
starting historyserver, logging to /opt/soft/hadoop260/logs/mapred-root-historyserver-mihaoyu155.out
[root@mihaoyu152 hadoop260]# jqop jps
--------------- mihaoyu152 指令信息 ---------------
jps
7219 Jps
6581 DFSZKFailoverController
6821 NodeManager
5894 JournalNode
6312 DataNode
6185 NameNode
6719 ResourceManager
--------------- mihaoyu153 指令信息 ---------------
jps
5458 Jps
4883 NameNode
4676 QuorumPeerMain
5188 NodeManager
5352 ResourceManager
4748 JournalNode
4972 DataNode
5085 DFSZKFailoverController
--------------- mihaoyu154 指令信息 ---------------
jps
3570 JournalNode
3972 Jps
3493 QuorumPeerMain
3800 NodeManager
3659 DataNode
--------------- mihaoyu155 指令信息 ---------------
jps
3797 Jps
3318 QuorumPeerMain
3528 NodeManager
3720 JobHistoryServer
3407 DataNode
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

honconM

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值