#! /bin/bash
#首先给脚本授权
#chmod 777 setup.sh
echo ‘full system install begining…’
#global var 设置全局变量
ipAddr=false
jdk=false
mysql=false
hadoop=false
hive=false
sqoop=false
zookeeper=false
hbase=false
spark=false
kafka=false
#addrIp setting
#设置虚拟机别名:hostnamectl set-hostname ycy
#sed -i ‘2a 192.168.126.200 ycy’ /etc/hosts
if [ “$ipAddr” = true ];then
echo ‘setting ipaddr…’
sed -i ‘s/dhcp/static/’ /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘IPADDR=“192.168.126.200”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘NETMASK=“255.255.255.0”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘GATEWAY=“192.168.126.2”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘DNS1=“114.114.114.114”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo ‘DNS2=“8.8.8.8”’ >> /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
#ping www.baidu.com
#关闭防火墙
#systemctl stop firewalld
#systemctl disable firewalld
fi
#配置免密登录
#ssh-keygen -t rsa -P ‘’
#回车
#cd .ssh
#cat id_rsa.pub >> authorized_keys
#chmod 600 authorized_keys
#vi /etc/hosts 在最后一行加上:ip地址 虚拟机别名
#查看免密配置是否成功:ssh 虚拟机别名 exit退出远程连接
#setup jdk 1.8.111
if [ “$jdk” = true ];then
echo ‘setup jdk 1.8’
cd /opt/
mkdir soft
tar -zxf /opt/jdk-8u111-linux-x64.tar.gz
mv /opt/jdk1.8.0_111 /opt/soft/jdk180
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /etc/profile
echo ‘export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar’ >> /etc/profile
echo ‘export PATH=$PATH:$JAVA_HOME/bin’ >> /etc/profile
#要删除openjdk中的有关压缩包
#rpm -qa | grep java
#rpm -e --nodeps java-1.7.0-openjdk-1.7.0.111-2.6.7.8.el7.x86_64
#rpm -e --nodeps java-1.8.0-openjdk-1.8.0.102-4.b14.el7.x86_64
#rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.102-4.b14.el7.x86_64
#rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.111-2.6.7.8.el7.x86_64
#./setup.sh
#source /etc/profile
#java -version
fi
#安装结束需要手动 source /etc/profile,激活前需要将自带的openJDK删除掉
#setup mysql 5.7
if [ “$mysql” = true ];then
echo ‘setup mysql 5.7’
cd /opt/
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-server -y
chown -R root:root /var/lib/mysql
chown root /var/lib/mysql/
service mysqld restart
mysql -h localhost -P3306 -uroot mysql --default-character-set=utf8 -e “use mysql;update user set password=password(‘ok’) where user=‘root’;GRANT ALL PRIVILEGES ON *.* TO root@’%’ IDENTIFIED BY ‘ok’;flush privileges”
sed -i ‘22a\character-set-server=utf8’ /etc/my.cnf
service mysqld restart
fi
#setup hadoop 2.6
if [ “$hadoop” = true ];then
echo ‘setup hadoop cdh 5.14.2 - hadoop 2.6’
cd /opt
tar -zxf /opt/hadoop-2.6.0-cdh5.14.2.tar.gz
mv /opt/hadoop-2.6.0-cdh5.14.2 /opt/soft/hadoop260
sed -i ‘s/${JAVA_HOME}//opt/soft/jdk180/’ /opt/soft/hadoop260/etc/hadoop/hadoop-env.sh
sed -i “19a fs.defaultFShdfs://$1:9000” /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘20a hadoop.tmp.dir/opt/soft/hadoop260/data’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘21a hadoop.proxyuser.root.groups’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i '22a hadoop.proxyuser.root.users’ /opt/soft/hadoop260/etc/hadoop/core-site.xml
sed -i ‘19a dfs.replication1’ /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
sed -i ‘20a dfs.permissionsfalse’ /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
cp /opt/soft/hadoop260/etc/hadoop/mapred-site.xml.template /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
sed -i ‘19a mapreduce.framework.nameyarn’ /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
sed -i ‘15a yarn.resourcemanager.localhostlocalhost’ /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
sed -i ‘16a yarn.nodemanager.aux-servicesmapreduce_shuffle’ /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
echo ‘#hadoop’ >> /etc/profile
echo ‘export HADOOP_HOME=/opt/soft/hadoop260’ >> /etc/profile
echo ‘export HADOOP_MAPRED_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_COMMON_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_HDFS_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export YARN_HOME=$HADOOP_HOME’ >> /etc/profile
echo ‘export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native’ >> /etc/profile
echo ‘export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin’ >> /etc/profile
echo ‘export HADOOP_INSTALL=$HADOOP_HOME’ >> /etc/profile
fi
#setup hive110
if [ “$hive” = true ];then
echo ‘setup hive110’
cd /opt/
tar -zxf hive-1.1.0-cdh5.14.2.tar.gz
mv /opt/hive-1.1.0-cdh5.14.2 /opt/soft/hive110
cp /opt/mysql-connector-java-5.1.38.jar /opt/soft/hive110/lib
#cp /opt/soft/hive110/conf/hive-env.sh.template /opt/soft/hive110/conf/hive-env.sh
touch /opt/soft/hive110/conf/hive-site.xml
echo ‘<?xml version="1.0" encoding="UTF-8" standalone="no"?>’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.metastore.warehouse.dir/opt/soft/hive110’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.metastore.localtrue’ >> /opt/soft/hive110/conf/hive-site.xml
echo “javax.jdo.option.ConnectionURLjdbc:mysql://$1/hive?createDatabaseIfNotExist=true” >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionDriverNamecom.mysql.jdbc.Driver’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionUserNameroot’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘javax.jdo.option.ConnectionPasswordok’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.authenticationNONE’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.thrift.client.userroot’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘hive.server2.thrift.client.passwordok’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘’ >> /opt/soft/hive110/conf/hive-site.xml
echo ‘#hive’ >> /etc/profile
echo ‘export HIVE_HOME=/opt/soft/hive110’ >> /etc/profile
echo ‘export PATH=$PATH:$HIVE_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source etc/profile
#第一次启动hive初始化
#schematool -dbType mysql -initSchema
#如果初始化失败,则去mysql中先将hive删除,再重新初始化
#开启hive服务: hive --service metastore hive
#开启hiveserver2服务用于远程(第三方)连接: hiveserver2 beeline -u jdbc:hive2://192.168.181.150:10000
fi
#setup sqoop
if [ “$sqoop” = true ];then
echo ‘setup sqoop146’
cd /opt/
tar -zxf sqoop-1.4.6-cdh5.14.2.tar.gz
mv /opt/sqoop-1.4.6-cdh5.14.2 /opt/soft/sqoop146
cp /opt/soft/sqoop146/conf/sqoop-env-template.sh /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HADOOP_COMMON_HOME=/opt/soft/hadoop260’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HADOOP_MAPRED_HOME=/opt/soft/hadoop260’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HIVE_HOME=/opt/soft/hive110’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export HBASE_HOME=/opt/soft/hbase120’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
echo ‘export ZOOCFGDIR=/opt/soft/zk345/conf’ >> /opt/soft/sqoop146/conf/sqoop-env.sh
cp /opt/mysql-connector-java-5.1.38.jar /opt/soft/sqoop146/lib
cp /opt/java-json.jar /opt/soft/sqoop146/lib
cp /opt/soft/hive110/lib/hive-common-1.1.0-cdh5.14.2.jar /opt/soft/sqoop146/lib
cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/sqoop146/lib
echo ‘#sqoop’ >> /etc/profile
echo ‘export SQOOP_HOME=/opt/soft/sqoop146’ >> /etc/profile
echo ‘export PATH=$PATH:$SQOOP_HOME/bin’ >> /etc/profile
#./setup.sh
#source /etc/profile
fi
#setup zookeeper :sed后面加c代表替换该行内容,a\代表在该行后追加,i\代表在该行前追加
if [ “$zookeeper” = true ];then
echo ‘setup zookeeper345’
cd /opt/
tar -zxf zookeeper-3.4.5-cdh5.14.2.tar.gz
mv /opt/zookeeper-3.4.5-cdh5.14.2 /opt/soft/zk345
cp /opt/soft/zk345/conf/zoo_sample.cfg /opt/soft/zk345/conf/zoo.cfg
mkdir -p /opt/soft/zk345/datas
sed -i ‘12c dataDir=/opt/soft/zk345/datas’ /opt/soft/zk345/conf/zoo.cfg
echo “server.0=$1:2287:3387” >> /opt/soft/zk345/conf/zoo.cfg
echo ‘#zookeeper’ >> /etc/profile
echo ‘export ZOOKEEPER_HOME=/opt/soft/zk345’ >> /etc/profile
echo ‘export PATH=$PATH:$ZOOKEEPER_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source /etc/profile
#zkServer.sh start
fi
#setup hbase
if [ “$hbase” = true ];then
echo ‘setup hbase120’
cd /opt/
tar -zxf hbase-1.2.0-cdh5.14.2.tar.gz
mv /opt/hbase-1.2.0-cdh5.14.2 /opt/soft/hbase120
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /opt/soft/hbase120/conf/hbase-env.sh
echo ‘export HBASE_MANAGES_ZK=false’ >> /opt/soft/hbase120/conf/hbase-env.sh
sed -i '23a<property>hbase.zookeeper.property.clientPort2181 ’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i ‘23a<property>hbase.zookeeper.property.dataDir/opt/soft/zk345/datas’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i ‘23a<property>hbase.cluster.distributedtrue’ /opt/soft/hbase120/conf/hbase-site.xml
sed -i “23a<property>>hbase.rootdirhdfs://$1:9000/hbase” /opt/soft/hbase120/conf/hbase-site.xml
echo ‘#hbase’ >> /etc/profile
echo ‘export HBASE_HOME=/opt/soft/hbase120’ >> /etc/profile
echo ‘export PATH=$PATH:$HBASE_HOME/bin’ >> /etc/profile
#./setup.sh 192.168.181.150
#source /etc/profile
#start-hbase.sh
#hbase shell
fi
#setup spark
if [ “$spark” = true ];then
echo ‘setup spark234’
cd /opt/
tar -zxf spark-2.3.4-bin-hadoop2.6.tgz
mv spark-2.3.4-bin-hadoop2.6 /opt/soft/spark234
cp /opt/soft/spark234/conf/slaves.template /opt/soft/spark234/conf/slaves
#echo ‘localhost’ >> /opt/soft/spark234/conf/slaves
cp /opt/soft/spark234/conf/spark-env.sh.template /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_HOST=$1’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_PORT=7077’ >> /opt/soft/spark234/conf/spark-env.sh
echo 'export SPARK_WORKER_CORES=2 ’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_WORKER_MEMORY=3g’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export SPARK_MASTER_WEBUI_PORT=8888’ >> /opt/soft/spark234/conf/spark-env.sh
echo ‘export JAVA_HOME=/opt/soft/jdk180’ >> /opt/soft/spark234/sbin/spark-config.sh
#在sbin目录下 ./start-all.sh 启动
#进入bin目录下 ./spark-shell 启动spark
#再去网页ping 192.168.181.150:4040
fi
#setup kafka
if [ “$kafka” = true ];then
echo ‘setup kafka’
cd /opt/
tar -zxf kafka_2.11-2.0.0.tgz
mv kafka_2.11-2.0.0 /opt/soft/kafka211
sed -i “31c listeners=PLAINTEXT://$1:9092” /opt/soft/kafka211/config/server.properties
sed -i ‘60c log.dirs=/opt/soft/kafka211/kafka-logs’ /opt/soft/kafka211/config/server.properties
sed -i “123c zookeeper.connect=$1:2181” /opt/soft/kafka211/config/server.properties
echo ‘#kafka’ >> /etc/profile
echo ‘export KAFKA_HOME=/opt/soft/kafka211’ >> /etc/profile
echo ‘export PATH=$PATH:$KAFKA_HOME/bin’ >> /etc/profile
echo ‘setup kafka complete’
#./setup.sh 192.168.126.200
#source /etc/profile
#先进入kafka211的bin目录下:cd /opt/soft/kafka211/bin
#前台启动:kafka-server-start.sh /opt/soft/kafka211/config/server.properties
#后台启动:nohup kafka-server-start.sh /opt/kafka211/config/server.properties > kafka.log 2>&1 &
#退出kafka:ctrl+c
fi
#关闭服务
#先关闭hbase
#stop-hbase.sh
#zkServer.sh stop
#hive里面用exit退出,metastore和hiveserver服务可以用ctrl+c关闭
#stop-all.sh
#shutdown now 关机