五、Hadoop高可用部署

(1)下载安装包并解压到指定目录

(2)修改配置文件,并添加必要变量

(3)分发到其他节点,配置环境变量

以dataxc用户为例,脚本示例如下:

#!/bin/bash
# ins-hadoop.sh

#hadoop节点
nd_list=(n101 n102 n103)
#zookeeper地址
zk_connect='n101:2181,n102:2181,n103:2181'
#hadoop共享编辑目录
jn_dir='n101:8485;n102:8485;n103:8485'

#解压hadoop到程序目录
cd /home/dataxc/sw && tar -zxvf hadoop-3.3.1.tar.gz -C /home/dataxc/opt

#为hadoop指定java路径和时区
sed -i 's!# export JAVA_HOME=!export JAVA_HOME=/home/dataxc/opt/jdk1.8.0_301!' /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/hadoop-env.sh
echo -e 'export HADOOP_OPTS="$HADOOP_OPTS -Duser.timezone='Asia/Shanghai'"' >> /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/{hadoop-env.sh,yarn-env.sh}

#配置core-site.xml
echo -e '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
' > /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/core-site.xml
echo -e "\
<configuration>
<!-- 默认文件系统名称 -->
<property>
	<name>fs.defaultFS</name>
	<value>hdfs://hd-c1/</value>
</property>
<!-- 临时文件存储目录-->
<property>
	<name>hadoop.tmp.dir</name>
	<value>/data/dfs/tmp</value>
</property>\n
<!-- zookeeper地址 -->
<property>
	<name>ha.zookeeper.quorum</name>
	<value>master:$zk_connect</value>
</property>\n
<!-- 授权模拟用户,用户hive自定义身份验证,按需 -->
<property>
	<name>hadoop.proxyuser.dataxc.hosts</name>
	<value>*</value>
</property>
<property>
	<name>hadoop.proxyuser.dataxc.groups</name>
	<value>dataxc,adm,root</value>
</property>
</configuration>" >> /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/core-site.xml

#配置hdfs-site.xml
echo -e '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
' > /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/hdfs-site.xml
echo -e "\
<configuration>
<!-- 服务名称,可以多个 -->
<property>
	<name>dfs.nameservices</name>
	<value>hd-c1</value>
</property>
<property>
	<name>dfs.ha.namend_list.hd-c1</name>
	<value>nn1,nn2</value>
</property>\n
<!-- 配置通信地址,注意文件系统名称 -->
<property>
	<name>dfs.namenode.rpc-address.hd-c1.nn1</name>
	<value>n101:9000</value>
</property>
<property>
	<name>dfs.namenode.http-address.hd-c1.nn1</name>
	<value>n101:50070</value>
</property>
<property>
	<name>dfs.namenode.servicerpc-address.hd-c1.nn1</name>
	<value>n101:53310</value>
</property>\n
<property>
	<name>dfs.namenode.rpc-address.hd-c1.nn2</name>
	<value>n102:9000</value>
</property>
<property>
	<name>dfs.namenode.http-address.hd-c1.nn2</name>
	<value>n102:50070</value>
</property>
<property>
	<name>dfs.namenode.servicerpc-address.hd-c1.nn2</name>
	<value>n102:53310</value>
</property>\n
<!-- 共享编辑目录 -->
<property>
	<name>dfs.namenode.shared.edits.dir</name>
	<value>qjournal://$jn_dir/hd-c1</value>
</property>
<!-- namenode元数据存储目录 -->
<property>
	<name> dfs.namenode.name.dir </name>
	<value>/data/dfs/hd-c1</value>
	<final>true</final>
</property>
<!-- jn存储目录 -->
<property>
	<name>dfs.journalnode.edits.dir</name>
	<value>/data/dfs/journal</value>
</property>\n
<!-- hdfs文件系统数据文件目录 -->
<property>
	<name>dfs.datanode.data.dir</name>
	<value>/data/dfs/data</value>
</property>\n
<!-- namenode失败自动切换 -->
<property>
	<name>dfs.ha.automatic-failover.enabled</name>
	<value>true</value>
</property>
<property>
	<name>dfs.client.failover.proxy.provider.hd-c1</name>
	<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>\n
<!-- ssh密钥文件 -->
<property>
	<name>dfs.ha.fencing.methods</name>
	<value>sshfence</value>
</property>
<property>
	<name>dfs.ha.fencing.ssh.private-key-files</name>
	<value>/home/dataxc/.ssh/id_rsa</value>
</property>\n
<!-- 副本数量 -->
<property>
	<name>dfs.replication</name>
	<value>3</value>
</property>\n
<!-- 是否开启权限控制,按需 -->
<property>
	<name>dfs.permissions.enabled</name>
	<value>false</value>
</property>\n
<!-- 写入超时时间,按需 -->
<property>
        <name>dfs.qjournal.write-txns.timeout.ms</name>
        <value>60000</value>
</property>
</configuration>" >> /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/hdfs-site.xml

#配置yarn-site.xml
echo -e '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
' > /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/yarn-site.xml
echo -e "\
<configuration>
<!-- 开启ResourceManager HA功能 -->
<property>
	<name>yarn.resourcemanager.ha.enabled</name>
	<value>true</value>
</property>
<property>
	<name>yarn.resourcemanager.cluster-id</name>
	<value>rm_ha_id</value>
</property>\n
<!-- ResourceManager ID列表 -->
<property>
	<name>yarn.resourcemanager.ha.rm-ids</name>
	<value>rm1,rm2</value>
</property>
<property>
	<name>yarn.resourcemanager.hostname.rm1</name>
	<value>n101</value>
</property>
<property>
	<name>yarn.resourcemanager.hostname.rm2</name>
	<value>n102</value>
</property>\n
<!-- 开启Recovery,保存应用状态信息,存储介质为ZKRMStateStore -->
<property>
	<name>yarn.resourcemanager.recovery.enabled</name>
	<value>true</value>
</property>
<property>
	<name>yarn.resourcemanager.store.class</name>
	<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!-- zookeeper地址 -->
<property>
	<name>yarn.resourcemanager.zk-address</name>
	<value>$zk_connect</value>
</property>\n
<!-- hadoop3默认mapreduce.shuffle,改为mapreduce_shuffle适配多版本 -->
<property>
	<name>yarn.nodemanager.aux-services</name>
	<value>mapreduce_shuffle</value>
</property>\n
<!-- 关闭物理内存监控,按需 -->
<property>
	<name>yarn.nodemanager.pmem-check-enabled</name>
	<value>false</value>
</property>
<!-- 关闭虚拟内存监控,按需 -->
<property>
	<name>yarn.nodemanager.vmem-check-enabled</name>
	<value>false</value>
</property>\n
<!-- YARN最大可使用节点内存,默认8192MB,-1不限制 -->
<property>
	<name>yarn.nodemanager.resource.memory-mb</name>
	<value>-1</value>
</property>
<!-- 自动计算可用物理内存,先确保最大可使用内存已配置为-1 -->
<property>
	<name>yarn.nodemanager.resource.detect-hardware-capabilities</name>
	<value>true</value>
</property>
<!-- 单个容器最小可使用内存/内存规整化因子 -->
<property>
	<name>yarn.scheduler.minimum-allocation-mb</name>
	<value>128</value>
</property>
<!-- 单个容器最大可使用内存 -->
<property>
	<name>yarn.scheduler.maximum-allocation-mb</name>
	<value>51200</value>
</property>
</configuration>" >> /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/yarn-site.xml

#配置mapred-site.xml
echo -e '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
' > /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/mapred-site.xml
echo -e "\
<configuration>
<!-- MapReduce运行模式,提交给YARN -->
<property>
	<name>mapreduce.framework.name</name>
	<value>yarn</value>
</property>
<!-- 启动带外心跳,迅速为空闲资源分配任务 -->
<property>
	<name>mapreduce.tasktracker.outofband.heartbeat</name>
	<value>true</value>
</property>
<!-- 环境变量 -->
<property>
	<name>yarn.app.mapreduce.am.env</name>
	<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
<property>
	<name>mapreduce.map.env</name>
	<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
<property>
	<name>mapreduce.reduce.env</name>
	<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
</configuration>" >> /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/mapred-site.xml

#添加变量
sed -i '1a export HDFS_ZKFC_USER=dataxc\
export HDFS_JOURNALNODE_USER=dataxc\
export HDFS_SECONDARYNAMENODE_USER=dataxc\
export HDFS_NAMENODE_USER=dataxc\
export HDFS_DATANODE_SECURE_USER=hdfs\
export HDFS_DATANODE_USER=dataxc' /home/dataxc/opt/hadoop-3.3.1/sbin/{start-dfs.sh,stop-dfs.sh}

sed -i '1a export YARN_PROXYSERVER_USER=dataxc\
export YARN_NODEMANAGER_USER=dataxc\
export HADOOP_SECURE_DN_USER=yarn\
export YARN_RESOURCEMANAGER_USER=dataxc' /home/dataxc/opt/hadoop-3.3.1/sbin/{start-yarn.sh,stop-yarn.sh}

#添加集群节点到workers
echo -e "n101\nn102\nn103" > /home/dataxc/opt/hadoop-3.3.1/etc/hadoop/workers

#分发到其他节点
for node in ${nd_list[*]:1}
	do
		scp -r /home/dataxc/opt/hadoop-3.3.1 dataxc@$node:/home/dataxc/opt
	done
:<<!
#添加环境变量
for node in ${nd_list[*]}
	do
		ssh dataxc@$node 'sed -i -e "/export JAVA_HOME=/a export HADOOP_HOME=/home/dataxc/opt/hadoop-3.3.1" \
		-e "/^export PATH=/ s/$/\:\$HADOOP_HOME\/bin\:\$HADOOP_HOME\/sbin/" /home/dataxc/.bashrc;
		source /home/dataxc/.bashrc'
	done
!

:<<!
#启动journalnode(先确保zk集群已启动)
/home/dataxc/opt/hadoop-3.3.1/bin/hdfs --workers --daemon start journalnode
#首次启动,格式化NameNode
/home/dataxc/opt/hadoop-3.3.1/bin/hdfs namenode -format
#启动NameNode
/home/dataxc/opt/hadoop-3.3.1/bin/hdfs --daemon start namenode
#同步元数据
ssh dataxc@n102 /home/dataxc/opt/hadoop-3.3.1/bin/hdfs namenode -bootstrapStandby
#首次启动,格式化zk
/home/dataxc/opt/hadoop-3.3.1/bin/hdfs zkfc -formatZK
#启动hdfs
/home/dataxc/opt/hadoop-3.3.1/sbin/start-dfs.sh
#启动yarn
/home/dataxc/opt/hadoop-3.3.1/sbin/start-yarn.sh
!
#end

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值