一键安装jdk、hadoop、spark、mysql、hive、sqoop、hbase、zookeeper、kafka脚本

#! /bin/bash

#首先给脚本授权
#chmod 777 setup.sh

echo ‘full system install begining…’

#global var 设置全局变量
ipAddr=false
jdk=false
mysql=false
hadoop=false
hive=false
sqoop=false
zookeeper=false
hbase=false
spark=false
kafka=false

#addrIp setting
#设置虚拟机别名:hostnamectl set-hostname ycy
#sed -i ‘2a 192.168.126.200 ycy’ /etc/hosts
if [ “$ipAddr” = true ];then
echo ‘setting ipaddr…’
sed -i ‘s/dhcp/static/’ /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘IPADDR=“192.168.126.200”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘NETMASK=“255.255.255.0”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘GATEWAY=“192.168.126.2”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘DNS1=“114.114.114.114”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘DNS2=“8.8.8.8”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
#ping www.baidu.com
#关闭防火墙
#systemctl stop firewalld
#systemctl disable firewalld
fi

#配置免密登录
#ssh-keygen -t rsa -P ‘’
#回车
#cd .ssh
#cat id_rsa.pub >> authorized_keys
#chmod 600 authorized_keys
#vi /etc/hosts 在最后一行加上:ip地址 虚拟机别名
#查看免密配置是否成功:ssh 虚拟机别名 exit退出远程连接

#setup jdk 1.8.111
if [ “$jdk” = true ];then
echo ‘setup jdk 1.8’
cd /opt/
mkdir soft
tar -zxf /opt/jdk-8u111-linux-x64.tar.gz
mv /opt/jdk1.8.0_111 /opt/soft/jdk180
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /etc/profile
echo ‘export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar’ >> /etc/profile
echo ‘export PATH=$PATH:$JAVA_HOME/bin’ >> /etc/profile
#要删除openjdk中的有关压缩包
#rpm -qa | grep java
#rpm -e --nodeps java-1.7.0-openjdk-1.7.0.111-2.6.7.8.el7.x86_64
#rpm -e --nodeps java-1.8.0-openjdk-1.8.0.102-4.b14.el7.x86_64
#rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.102-4.b14.el7.x86_64
#rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.111-2.6.7.8.el7.x86_64
#./setup.sh
#source /etc/profile
#java -version
fi
#安装结束需要手动 source /etc/profile,激活前需要将自带的openJDK删除掉

#setup mysql 5.7
if [ “$mysql” = true ];then
echo ‘setup mysql 5.7’
cd /opt/
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-server -y
chown -R root:root /var/lib/mysql
chown root /var/lib/mysql/
service mysqld restart
mysql -h localhost -P3306 -uroot mysql --default-character-set=utf8 -e “use mysql;update user set password=password(‘ok’) where user=‘root’;GRANT ALL PRIVILEGES ON *.* TO root@’%’ IDENTIFIED BY ‘ok’;flush privileges”
sed -i ‘22a\character-set-server=utf8’ /etc/my.cnf
service mysqld restart
fi

#setup hadoop 2.6
if [ “$hadoop” = true ];then
echo ‘setup hadoop cdh 5.14.2 - hadoop 2.6’
cd /opt
tar -zxf /opt/hadoop-2.6.0-cdh5.14.2.tar.gz
mv /opt/hadoop-2.6.0-cdh5.14.2 /opt/soft/hadoop260
sed -i ‘s/${JAVA_HOME}//opt/soft/jdk180/’ /opt/soft/hadoop260/etc/hadoop/hadoop-env.sh
sed -i “19a fs.defaultFShdfs://$1:9000” /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘20a hadoop.tmp.dir/opt/soft/hadoop260/data’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘21a hadoop.proxyuser.root.groups’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i '22a hadoop.proxyuser.root.users’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘19a dfs.replication1’ /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
sed -i ‘20a dfs.permissionsfalse’ /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
cp /opt/soft/hadoop260/etc/hadoop/mapred-site.xml.template /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
sed -i ‘19a mapreduce.framework.nameyarn’ /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
sed -i ‘15a yarn.resourcemanager.localhostlocalhost’ /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
sed -i ‘16a yarn.nodemanager.aux-servicesmapreduce_shuffle’ /opt/soft/hadoop260/etc/hadoop/yarn-site.xml

echo ‘#hadoop’ >> /etc/profile
echo ‘export HADOOP_HOME=/opt/soft/hadoop260’ >> /etc/profile
echo ‘export HADOOP_MAPRED_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_COMMON_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_HDFS_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export YARN_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native’ >> /etc/profile
echo ‘export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin’ >> /etc/profile
echo ‘export HADOOP_INSTALL=$HADOOP_HOME’ >> /etc/profile
fi

#setup hive110
if [ “$hive” = true ];then
echo ‘setup hive110’
cd /opt/
tar -zxf hive-1.1.0-cdh5.14.2.tar.gz
mv /opt/hive-1.1.0-cdh5.14.2 /opt/soft/hive110
cp /opt/mysql-connector-java-5.1.38.jar /opt/soft/hive110/lib
#cp /opt/soft/hive110/conf/hive-env.sh.template /opt/soft/hive110/conf/hive-env.sh
touch /opt/soft/hive110/conf/hive-site.xml
echo ‘<?xml version="1.0" encoding="UTF-8" standalone="no"?>’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.metastore.warehouse.dir/opt/soft/hive110’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.metastore.localtrue’ >> /opt/soft/hive110/conf/hive-site.xml
echo “javax.jdo.option.ConnectionURLjdbc:mysql://$1/hive?createDatabaseIfNotExist=true” >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionDriverNamecom.mysql.jdbc.Driver’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionUserNameroot’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionPasswordok’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.authenticationNONE’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.thrift.client.userroot’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.thrift.client.passwordok’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘’ >> /opt/soft/hive110/conf/hive-site.xml

echo ‘#hive’ >> /etc/profile
echo ‘export HIVE_HOME=/opt/soft/hive110’ >> /etc/profile
echo ‘export PATH=$PATH:$HIVE_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source etc/profile
#第一次启动hive初始化
#schematool -dbType mysql -initSchema
#如果初始化失败,则去mysql中先将hive删除,再重新初始化
#开启hive服务: hive --service metastore hive
#开启hiveserver2服务用于远程(第三方)连接: hiveserver2 beeline -u jdbc:hive2://192.168.181.150:10000
fi

#setup sqoop
if [ “$sqoop” = true ];then
echo ‘setup sqoop146’
cd /opt/
tar -zxf sqoop-1.4.6-cdh5.14.2.tar.gz
mv /opt/sqoop-1.4.6-cdh5.14.2 /opt/soft/sqoop146
cp /opt/soft/sqoop146/conf/sqoop-env-template.sh /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HADOOP_COMMON_HOME=/opt/soft/hadoop260’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HADOOP_MAPRED_HOME=/opt/soft/hadoop260’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HIVE_HOME=/opt/soft/hive110’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HBASE_HOME=/opt/soft/hbase120’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export ZOOCFGDIR=/opt/soft/zk345/conf’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
cp /opt/mysql-connector-java-5.1.38.jar /opt/soft/sqoop146/lib
cp /opt/java-json.jar /opt/soft/sqoop146/lib
cp /opt/soft/hive110/lib/hive-common-1.1.0-cdh5.14.2.jar /opt/soft/sqoop146/lib
cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/sqoop146/lib
echo ‘#sqoop’ >> /etc/profile
echo ‘export SQOOP_HOME=/opt/soft/sqoop146’ >> /etc/profile
echo ‘export PATH=$PATH:$SQOOP_HOME/bin’ >> /etc/profile
#./setup.sh
#source /etc/profile
fi

#setup zookeeper :sed后面加c代表替换该行内容,a\代表在该行后追加,i\代表在该行前追加
if [ “$zookeeper” = true ];then
echo ‘setup zookeeper345’
cd /opt/
tar -zxf zookeeper-3.4.5-cdh5.14.2.tar.gz
mv /opt/zookeeper-3.4.5-cdh5.14.2 /opt/soft/zk345
cp /opt/soft/zk345/conf/zoo_sample.cfg /opt/soft/zk345/conf/zoo.cfg
mkdir -p /opt/soft/zk345/datas
sed -i ‘12c dataDir=/opt/soft/zk345/datas’ /opt/soft/zk345/conf/zoo.cfg
echo “server.0=$1:2287:3387” >> /opt/soft/zk345/conf/zoo.cfg
echo ‘#zookeeper’ >> /etc/profile
echo ‘export ZOOKEEPER_HOME=/opt/soft/zk345’ >> /etc/profile
echo ‘export PATH=$PATH:$ZOOKEEPER_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source /etc/profile
#zkServer.sh start
fi

#setup hbase
if [ “$hbase” = true ];then
echo ‘setup hbase120’
cd /opt/
tar -zxf hbase-1.2.0-cdh5.14.2.tar.gz
mv /opt/hbase-1.2.0-cdh5.14.2 /opt/soft/hbase120
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /opt/soft/hbase120/conf/hbase-env.sh
echo ‘export HBASE_MANAGES_ZK=false’ >> /opt/soft/hbase120/conf/hbase-env.sh
sed -i '23a<property>hbase.zookeeper.property.clientPort2181 ’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i ‘23a<property>hbase.zookeeper.property.dataDir/opt/soft/zk345/datas’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i ‘23a<property>hbase.cluster.distributedtrue’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i “23a<property>>hbase.rootdirhdfs://$1:9000/hbase” /opt/soft/hbase120/conf/hbase-site.xml

echo ‘#hbase’ >> /etc/profile
echo ‘export HBASE_HOME=/opt/soft/hbase120’ >> /etc/profile
echo ‘export PATH=$PATH:$HBASE_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source /etc/profile
#start-hbase.sh
#hbase shell
fi

#setup spark
if [ “$spark” = true ];then
echo ‘setup spark234’
cd /opt/
tar -zxf spark-2.3.4-bin-hadoop2.6.tgz
mv spark-2.3.4-bin-hadoop2.6 /opt/soft/spark234
cp /opt/soft/spark234/conf/slaves.template /opt/soft/spark234/conf/slaves
#echo ‘localhost’ >> /opt/soft/spark234/conf/slaves
cp /opt/soft/spark234/conf/spark-env.sh.template /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_HOST=$1’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_PORT=7077’ >> /opt/soft/spark234/conf/spark-env.sh
echo 'export SPARK_WORKER_CORES=2 ’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_WORKER_MEMORY=3g’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_WEBUI_PORT=8888’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /opt/soft/spark234/sbin/spark-config.sh
#在sbin目录下 ./start-all.sh 启动
#进入bin目录下 ./spark-shell 启动spark
#再去网页ping 192.168.181.150:4040
fi

#setup kafka
if [ “$kafka” = true ];then
echo ‘setup kafka’
cd /opt/
tar -zxf kafka_2.11-2.0.0.tgz
mv kafka_2.11-2.0.0 /opt/soft/kafka211
sed -i “31c listeners=PLAINTEXT://$1:9092” /opt/soft/kafka211/config/server.properties
sed -i ‘60c log.dirs=/opt/soft/kafka211/kafka-logs’ /opt/soft/kafka211/config/server.properties
sed -i “123c zookeeper.connect=$1:2181” /opt/soft/kafka211/config/server.properties
echo ‘#kafka’ >> /etc/profile
echo ‘export KAFKA_HOME=/opt/soft/kafka211’ >> /etc/profile
echo ‘export PATH=$PATH:$KAFKA_HOME/bin’ >> /etc/profile
echo ‘setup kafka complete’
#./setup.sh 192.168.126.200
#source /etc/profile
#先进入kafka211的bin目录下:cd /opt/soft/kafka211/bin
#前台启动:kafka-server-start.sh /opt/soft/kafka211/config/server.properties
#后台启动:nohup kafka-server-start.sh /opt/kafka211/config/server.properties > kafka.log 2>&1 &
#退出kafka:ctrl+c
fi

#关闭服务
#先关闭hbase
#stop-hbase.sh
#zkServer.sh stop
#hive里面用exit退出,metastore和hiveserver服务可以用ctrl+c关闭
#stop-all.sh
#shutdown now 关机

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
安装和部署HadoopSparkHiveHbaseZookeeperKafka等其他Hadoop生态组件是一个复杂的过程,需要一定的技术水平和经验。以下是一些基本步骤和相关指令: 1. 安装Java Hadoop和其他Hadoop生态组件都需要Java环境。如果您的系统上没有安装Java,请先安装Java。在Ubuntu系统上,可以使用以下命令安装Java: ``` sudo apt-get update sudo apt-get install default-jdk ``` 2. 安装Hadoop 可以从Hadoop官网下载Hadoop二进制文件,并按照以下步骤进行安装: - 将Hadoop压缩包解压缩到一个目录中 - 配置Hadoop环境变量,例如在.bashrc文件中添加以下内容: ``` export HADOOP_HOME=/path/to/hadoop export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin ``` - 配置Hadoop的配置文件,例如hadoop-env.sh、core-site.xml、hdfs-site.xml、mapred-site.xml和yarn-site.xml文件等。这些文件位于Hadoop的conf目录下,可以通过修改这些文件来配置Hadoop集群的参数。 3. 安装Spark 可以从Spark官网下载Spark二进制文件,并按照以下步骤进行安装: - 将Spark压缩包解压缩到一个目录中 - 配置Spark环境变量,例如在.bashrc文件中添加以下内容: ``` export SPARK_HOME=/path/to/spark export PATH=$PATH:$SPARK_HOME/bin ``` - 配置Spark的配置文件,例如spark-env.sh、spark-defaults.conf和log4j.properties文件等。这些文件位于Spark的conf目录下,可以通过修改这些文件来配置Spark集群的参数。 4. 安装Hive 可以从Hive官网下载Hive二进制文件,并按照以下步骤进行安装: - 将Hive压缩包解压缩到一个目录中 - 配置Hive环境变量,例如在.bashrc文件中添加以下内容: ``` export HIVE_HOME=/path/to/hive export PATH=$PATH:$HIVE_HOME/bin ``` - 配置Hive的配置文件,例如hive-env.sh、hive-site.xml和hive-log4j2.properties文件等。这些文件位于Hive的conf目录下,可以通过修改这些文件来配置Hive集群的参数。 5. 安装Hbase 可以从Hbase官网下载Hbase二进制文件,并按照以下步骤进行安装: - 将Hbase压缩包解压缩到一个目录中 - 配置Hbase环境变量,例如在.bashrc文件中添加以下内容: ``` export HBASE_HOME=/path/to/hbase export PATH=$PATH:$HBASE_HOME/bin ``` - 配置Hbase的配置文件,例如hbase-env.sh、hbase-site.xml和log4j2.properties文件等。这些文件位于Hbase的conf目录下,可以通过修改这些文件来配置Hbase集群的参数。 6. 安装Zookeeper 可以从Zookeeper官网下载Zookeeper二进制文件,并按照以下步骤进行安装: - 将Zookeeper压缩包解压缩到一个目录中 - 配置Zookeeper环境变量,例如在.bashrc文件中添加以下内容: ``` export ZOOKEEPER_HOME=/path/to/zookeeper export PATH=$PATH:$ZOOKEEPER_HOME/bin ``` - 配置Zookeeper的配置文件,例如zoo.cfg文件等。这些文件位于Zookeeper的conf目录下,可以通过修改这些文件来配置Zookeeper集群的参数。 7. 安装Kafka 可以从Kafka官网下载Kafka二进制文件,并按照以下步骤进行安装: - 将Kafka压缩包解压缩到一个目录中 - 配置Kafka环境变量,例如在.bashrc文件中添加以下内容: ``` export KAFKA_HOME=/path/to/kafka export PATH=$PATH:$KAFKA_HOME/bin ``` - 配置Kafka的配置文件,例如server.properties文件等。这些文件位于Kafka的config目录下,可以通过修改这些文件来配置Kafka集群的参数。 以上是安装部署HadoopSparkHiveHbaseZookeeperKafka等其他Hadoop生态组件的基本步骤和相关指令。在实际操作中,可能还需要根据实际情况进行一些定制化的配置和优化。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值