###使用前将jdk,hadoop压缩包放到/otp/software目录中
###jtar、htar为jdk和hadoop压缩包文件名。jdir、hdir为解压后的目录名。根据实际情况替换
###执行脚本前需要将3节点的免密登录配置好,hadoop1免密登录hadoop1、hadoop2、hadoop3
jtar=jdk-8u202-linux-x64.tar.gz
htar=hadoop-3.3.0.tar.gz
jdir=jdk1.8.0_202
hdir=hadoop-3.3.0
#如果是重新部署,则先将停掉服务并删除原来的数据目录
echo "stop-all.sh and remove all data---"
stop-all.sh
rm -rf /opt/tmp
rm -rf /opt/software/$jdir
rm -rf /opt/software/$hdir
#安装JDK
echo "--install jdk and hadoop--"
cd /opt/software
tar zxf $jtar
tar zxf $htar
#配置环境变量
echo "modify /etc/profile---"
sed -i '70,$ {/export/d}' /etc/profile
echo "export JAVA_HOME=/opt/software/$jdir">>/etc/profile
echo "export HADOOP_HOME=/opt/software/$hdir">>/etc/profile
echo 'export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>/etc/profile
source /etc/profile
#修改配置文件
echo "modify hadoop-env.sh,core-site.xml---"
cd $HADOOP_HOME/etc/hadoop/
sed -i "/<configuration>/a <property>\n<name>fs.defaultFS</name>\n<value>hdfs://hadoop1:9000</value>\n</property>\n<property>\n<name>hadoop.tmp.dir</name>\n<value>/opt/tmp</value>\n</property><property><name>hadoop.http.staticuser.user</name><value>root</value></property>\n<property><name>hadoop.proxyuser.root.hosts</name><value>*</value></property><property><name>hadoop.proxyuser.root.groups</name><value>*</value></property>" core-site.xml
sed -i "/<configuration>/a <property>\n<name>dfs.replication</name>\n<value>3</value>\n</property>\n<property>\n<name>dfs.permissions</name>\n<value>false</value>\n</property>" hdfs-site.xml
sed -i "/<configuration>/a <property>\n<name>mapreduce.framework.name</name>\n<value>yarn</value>\n</property><property><name>yarn.app.mapreduce.am.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.map.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.reduce.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property>" mapred-site.xml
sed -i "/<configuration>/a <property>\n<name>yarn.resourcemanager.hostname</name>\n<value>hadoop1</value>\n</property>\n<property>\n<name>yarn.nodemanager.aux-services</name>\n<value>mapreduce_shuffle</value>\n</property>" yarn-site.xml
sed -i "1a export JAVA_HOME=/opt/software/$jdir\nexport HDFS_NAMENODE_USER=root\nexport HDFS_DATANODE_USER=root\nexport HDFS_SECONDARYNAMENODE_USER=root\nexport YARN_RESOURCEMANAGER_USER=root\nexport YARN_NODEMANAGER_USER=root" hadoop-env.sh
#hadoop1同时作为主、从节点。hadoop2、hadoop3是从节点
echo "hadoop1">workers
echo "hadoop2">>workers
echo "hadoop3">>workers
ssh hadoop2 rm -rf /opt/tmp
ssh hadoop3 rm -rf /opt/tmp
ssh hadoop2 mkdir -p $JAVA_HOME
ssh hadoop3 mkdir -p $JAVA_HOME
ssh hadoop2 mkdir -p $HADOOP_HOME
ssh hadoop3 mkdir -p $HADOOP_HOME
ssh hadoop3 mkdir -p /opt/software
scp -r $JAVA_HOME hadoop2:$JAVA_HOME/.. >/dev/null
scp -r $JAVA_HOME hadoop3:$JAVA_HOME/.. >/dev/null
scp -r $HADOOP_HOME hadoop2:$HADOOP_HOME/.. >/dev/null
scp -r $HADOOP_HOME hadoop3:$HADOOP_HOME/.. >/dev/null
scp /etc/hosts hadoop2:/etc
scp /etc/hosts hadoop3:/etc
scp /etc/profile hadoop2:/etc
scp /etc/profile hadoop3:/etc
ssh hadoop2 source /etc/profile
ssh hadoop3 source /etc/profile
echo "format hadoop----"
hdfs namenode -format
echo "start hadoop----"
start-all.sh
hadoop完全分布式部署脚本(3节点)
最新推荐文章于 2025-05-22 10:56:32 发布