sheel脚本 centos环境 自动安装hadoop

1提前在/opt/download/hadoopinstall/目录下存放好4个配置文件 安装前提前修改好里面的参数

第一个文件 core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<!--配置namenode的地址 外部访问hadoop-->
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://192.168.75.245:9820</value>
	</property>
	<!--配置数据存储目录 数据文件存放数据的 namenode-->
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/opt/software/hadoop313/data</value>
	</property>
	<!--配置HDFS网页登录使用的静态用户为root 代理 have以root的身份访问hadoop-->
	<property>
		<name>hadoop.http.staticuser.user</name>
		<value>root</value>
	</property>
	<!--配置root(超级用户)允许通过代理访问的主机节点-->
	<property>
		<name>hadoop.proxyuser.root.hosts</name>
		<value>*</value>
	</property>
	<!--配置root(超级用户)允许通过代理用户所属组-->
	<property>
		<name>hadoop.proxyuser.root.groups</name>
		<value>*</value>
	</property>
	<!--配置root(超级用户)允许通过代理的用户-->
	<property>
		<name>hadoop.proxyuser.root.user</name>
		<value>*</value>
	</property>
</configuration>

第2个文件

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<!--配置namenode web访问地址-->
	<property>
		<name>dfs.namenode.http-address</name>
		<value>192.168.75.245:9870</value>
	</property>
	<!--配置secondary namenode web访问地址-->
	<property>
		<name>dfs.namenode.secondary.http-address</name>
		<value>192.168.75.245:9868</value>
	</property>
	<!--配置hdfs副本数量 备份机器数-->
	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>
</configuration>

第3个文件 mapred-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<!--配置mapreduce运行于yarn上:默认为local,也可以指定spark阶段了解的mesos-->
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
	<!--配置历史服务器地址-->
	<property>
		<name>mapreduce.jobhistory.address</name>
		<value>192.168.75.245:10020</value>
	</property>
	<!--配置历史服务器web端地址-->
	<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>192.168.75.245:19888</value>
	</property>
	<!--这一下的代码是为了解决 yarn不能找到包的问题 因为我们的机器没有出这样的错误所以下面的代码未测试 -->

	<!---property>
  		<name>yarn.app.mapreduce.am.env</name>
  		<value>HADOOP_MAPRED_HOME=${full path of your hadoop distribution directory}</value>
	</property>
	<property>
  		<name>mapreduce.map.env</name>
  		<value>HADOOP_MAPRED_HOME=${full path of your hadoop distribution directory}</value>
	</property>
	<property>
  		<name>mapreduce.reduce.env</name>
  		<value>HADOOP_MAPRED_HOME=${full path of your hadoop distribution directory}</value>
	</property  -->
</configuration>

第4个 yarn-site.xml

<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
	<!--配置mr的执行方式-->
	<property>
	    <name>yarn.nodemanager.aux-services</name>
	    <value>mapreduce_shuffle</value>
	</property>
	<!--配置ResourceManager的地址-->
	<property>
	    <name>yarn.resourcemanager.hostname</name>
	    <value>singlefangliang</value>
	</property>
	<!--配置环境变量的继承-->
	<property>
	    <name>yarn.nodemanager.env-whitelist</name>
	    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
	<!--yarn初始申请内存大小-->
	<property>
       	 <name>yarn.app.mapreduce.am.resource.mb</name>
       	 <value>1024</value>
  	 </property>
	<!--配置yarn容器允许分配的最小内存-->
	<property>
	    <name>yarn.scheduler.minimum-allocation-mb</name>
	    <value>1024</value>
	</property>
	<!--配置yarn容器允许分配的最大内存-->
	<property>
	    <name>yarn.scheduler.maximum-allocation-mb</name>
	    <value>3072</value>
	</property>
	<!--配置yarn容器允许管理的物理内存大小-->
	<property>
	    <name>yarn.nodemanager.resource.memory-mb</name>
	    <value>3072</value>
	</property>
	<!--配置关闭yarn对物理内存和虚拟内存的限制检查,
	     	     	jdk8运行于centos以上版本会导致虚拟内存过大-->
	<property>
	    <name>yarn.nodemanager.pmem-check-enabled</name>
	    <value>false</value>
	</property>
	<!--配置关闭yarn对物理内存和虚拟内存的限制检查-->
	<property>
	    <name>yarn.nodemanager.vmem-check-enabled</name>
	    <value>false</value>
	</property>
	<!--开启日志聚集-->
	<property>
	    <name>yarn.log-aggregation-enable</name>
	    <value>true</value>
	</property>
	<!--配置日志聚集服务器地址-->
	<property>
	    <name>yarn.log.server.url</name>
	    <value>http://192.168.75.245:19888/jobhistory/logs</value>
	</property>
	<!--配置日志保留时间为7天-->
	<property>
	    <name>yarn.log-aggregation.retain-seconds</name>
	    <value>604800</value>
	</property>

2运行./hadoopthirdinstall.sh

#!/bin/bash

#hadoop安装
#此安装插件的前提是:在/opt/download里面事先存放hadoop-3.1.3.tar.gz安装包
#安装过程需要按回车
#并且安装之前要把hadoop的4个site.xml文件 事先放到/opt/downlaod/hadoopinstall目录下 并按照需要的配置修改好配置文件
#如果安装完毕以后 免密的验证不成功 删掉hadoop313/data文件 重新运行脚本

#1查找安装路径是否存在 存在删除 不存在新建
ENV_DIR='/etc/profile.d/'
SOFTWARE_DIR='/opt/software/'
DOWNLOAD_DIR='/opt/download/'
HADOOP_ENV='#hadoop;export-HADOOP_HOME=/opt/software/hadoop313;export-PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH;export-HDFS_NAMENODE_USER=root;export-HDFS_DATANODE_USER=root;export-HDFS_SECONDARYNAMENODE_USER=root;export-YARN_RESOURCEMANAGER_USER=root;export-YARN_NODEMANAGER_USER=root;export-HADOOP_COMMON_HOME=$HADOOP_HOME;export-HADOOP_HDFS_HOME=$HADOOP_HOME;export-HADOOP_MAPRED_HOME=$HADOOP_HOME;export-HADOOP_YARN_HOME=$HADOOP_HOME;export-HADOOP_INSTALL=$HADOOP_HOME;export-HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native;export-HADOOP_CONF_DIR=$HADOOP_HOME;export-HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec;export-JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH;export-HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop'


#删除myenv.sh 文件中的配置内容
function removeIfExists(){
        if [ $# -lt 2 ]
        then
                echo '参数给的太少'
        fi
        sed -rin "/^#.*?$1/,+$2d" $ENV_DIR'myenv.sh'
}   

function addEnv(){
	DIR="$1"
        DIR=${DIR//;/ }
        for item in $DIR
        do
                sed -in '$a'$item $ENV_DIR'myenv.sh'
        done
        sed -in 's/-/ /g' $ENV_DIR'myenv.sh'
        echo 'myenv.sh添加hadoop配置文件信息'
}

RST=`ls $SOFTWARE_DIR|grep hadoop`
if [[ -z $RST ]]
then
        echo '原机未安装hadoop'
        tar -zxf $DOWNLOAD_DIR'hadoop-3.1.3.tar.gz' -C $SOFTWARE_DIR
				mv /opt/software/hadoop* /opt/software/hadoop313
        echo '安装完毕'
else
        rm -rf $SOFTWARE_DIR'hadoop*'
        echo '原机安装过hadoop'
        tar -zxf $DOWNLOAD_DIR'hadoop-3.1.3.tar.gz' -C $SOFTWARE_DIR
				mv /opt/software/hadoop* /opt/software/hadoop313
        echo '删除原hadoop 重新安装成功'
fi


RST=`ls $ENV_DIR|grep myenv.sh`
if [[ -z $RST ]]
then
	touch $ENV_DIR'myenv.sh'
	echo '原myenv.sh配置文件不存在,新建完成'
else
	echo '原机存在myenv.sh配置文件'
	removeIfExists hadoop 17
	echo '原机myenv.sh中的hadoop配置文件信息已删除'
fi

addEnv $HADOOP_ENV

sed -in '$a''StrictHostKeyChecking no' /etc/ssh/ssh_config
sed -in '$a''UserKnownHostsFile /dev/null' /etc/ssh/ssh_config
echo '修改配置文件/etc/ssh/ssh_config成功'

RST=`ls -la /root|grep .ssh`
if [ -n "$RST" ]
then
	echo '原机存在.ssh文件'
	rm -rf /root/.ssh
	echo '删除原机.ssh'
else
	echo '原机不存在.ssh文件'
fi
ssh-keygen -t rsa
cat /root/.ssh/id_rsa.pub>>/root/.ssh/authorized_keys
echo '设置免密成功'

cp /opt/download/hadoopinstall/core-site.xml /opt/software/hadoop313/etc/hadoop/core-site.xml
cp /opt/download/hadoopinstall/hdfs-site.xml /opt/software/hadoop313/etc/hadoop/hdfs-site.xml
cp /opt/download/hadoopinstall/yarn-site.xml /opt/software/hadoop313/etc/hadoop/hdfs-site.xml
cp /opt/download/hadoopinstall/mapred-site.xml /opt/software/hadoop313/etc/hadoop/mapred-site.xml
echo '已将您写好的4个site.xml配置文件写入到hadoop中'


source /etc/profile

cd /opt/software/hadoop313/bin
./hdfs namenode -format
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值