原生Hadoop、spark集群一键部署脚本

#!/bin/bash
#############################################################
#############脚本使用说明####################################
#1、使用脚本前需要弄好服务器的基础环境#######################
#2、在hadoop的每个节点需要手动创建如下目录:/data/hdfs/tmp###
#3、修改下面的配置参数#######################################
#4、脚本执行完备后需要收到格式化namenode#####################
#5、格式化指令:hdfs namenode -format########################
#############################################################
#定义参数变量
#主节点域名
master_dns='slave-3'
#从节点1域名,2NN的配置地址
slave_1='slave-4'
#所有从节点信息
slaves=(slave-4 slave-5)
#java安装路径
java_home='/opt/java/jdk1.8.0_144'
#hadoop版本信息
hadoop_version='hadoop-2.8.1'
#hadoop数据存储路径
hadoop_data_path='/data/hdfs/tmp'
#hadoop安装包存储路径
hadoop_install_package='/opt/package/'$hadoop_version'.tar.gz'
#hdfs副本数量
dfs_replication='2'
#spark版本信息
spark_version='spark-3.0.2-bin-hadoop2.7'
#spark安装包存储路径
spark_install_package='/opt/package/'$spark_version'.tgz'
###########################################
##############安装hadoop###################
###########################################
echo $(date) 'info: 开始安装大数据基础环境...'
echo $(date) 'info: 校验java版本...'
java -version
echo $(date) 'info: 创建hadoop数据存储路径...'
mkdir -p $hadoop_data_path
echo $(date) 'info: 解压hadoop安装包...'
tar -zxvf $hadoop_install_package -C /opt
#配置hadoop环境变量
echo $(date) 'info: 配置hadoop环境变量..'
echo 'export HADOOP_HOME=/opt/'$hadoop_version>>/etc/profile
echo 'export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>/etc/profile
source /etc/profile
#配置hadoop中hadoop-env.sh配置
echo $(date) 'info: 配置hadoop...'
echo 'export JAVA_HOME='$java_home>>/opt/$hadoop_version/etc/hadoop/hadoop-env.sh
#配置hadoop中core-site.xml 指定hdfs老大
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<name>fs.defaultFS</name>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<value>hdfs://'$master_dns':9000</value>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
#指定hadoop运行时产生的文件存储目录
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<name>hadoop.tmp.dir</name>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<value>'$hadoop_data_path'</value>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
#配置hadoop中hdfs-site.xml 指定hdfs副本数量
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<name>dfs.replication</name>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<value>'$dfs_replication'</value>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
#指定2NN地址
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<name>dfs.namenode.secondary.http-address</name>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<value>'$slave_1':9001</value>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
#配置hadoop中mapred-site.xml
cp /opt/$hadoop_version/etc/hadoop/mapred-site.xml.template /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<name>mapreduce.framework.name</name>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<value>yarn</value>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
#配置yarn-site.xml 指定RM地址
sed -i '15a\\t</property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<name>yarn.resourcemanager.hostname</name>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<value>'$master_dns'</value>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
#指定MapReduce程序可以运行在yarn上
sed -i '15a\\t</property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<name>yarn.nodemanager.aux-services</name>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<value>mapreduce_shuffle</value>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
#配置slaves
sed -i '1d' /opt/$hadoop_version/etc/hadoop/slaves
for slave in ${slaves[@]};
do
  echo $slave>>/opt/$hadoop_version/etc/hadoop/slaves
done
#发送配置好的hadoop到其余节点
if [[ ${#slaves[@]} -gt 1 ]]; then
    for slave in ${slaves[@]};
do
  scp -r /opt/$hadoop_version/ @$slave:/opt/
  scp -r /etc/profile @$slave:/etc/
done
fi
#刷新环境变量
source /etc/profile
echo $(date) 'info: 配置hadoop完成!!!'
###########################################
############安装spark######################
###########################################
echo $(date) 'info: 开始安装spark...'
tar -zxvf $spark_install_package -C /opt
echo $(date) 'info: 开始配置spark'
#配置spark环境变量
echo 'export SPARK_HOME=/opt/'$spark_version>>/etc/profile
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin'>>/etc/profile
#配置spark-env.sh
cp /opt/$spark_version/conf/spark-env.sh.template /opt/$spark_version/conf/spark-env.sh
echo 'export HADOOP_CONF_DIR=/opt/'$hadoop_version'/etc/hadoop'>>/opt/$spark_version/conf/spark-env.sh
echo 'export SPARK_MASTER_PORT=7077'>>/opt/$spark_version/conf/spark-env.sh
echo 'export SPARK_MASTER_IP='$master_dns>>/opt/$spark_version/conf/spark-env.sh
echo 'export JAVA_HOME='$java_home>>/opt/$spark_version/conf/spark-env.sh
#配置slaves
cp /opt/$spark_version/conf/slaves.template /opt/$spark_version/conf/slaves
sed -i '$d' /opt/$spark_version/conf/slaves
for slave in ${slaves[@]};
do
  echo $slave>>/opt/$spark_version/conf/slaves
done
#将spark下发其余节点
if [[ ${#slaves[@]} -gt 1 ]]; then
    for slave in ${slaves[@]};
do
  scp -r /opt/$spark_version/ @$slave:/opt/
  scp -r /etc/profile @$slave:/etc/
done
fi
echo $(date) 'info: 配置spark完成!!!'

注:此脚本自己亲测,主要为了简化搭建流程,其中的一些配置为基础配置,如有其它配置需求可以自行修改上面配置信息

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值