脚本mysql java 虚拟机基本信息 hadoop hive zeppelin 阿里源 静态地址 hosts文件

#修改系统名字 同时修改hosts文件

#修改IP静态地址 #关闭防火墙
#修改yum源为阿里源#检查文件的文件夹是否存在 不存在就创建一个#安装mysql5.7

#用expect配置全自动ssh免密

#安装hadoop

#配置4个site.xml和一个evn.sh

#安装hive

#!/bin/bash

#修改系统名字 同时修改hosts文件
modify_sysname(){
	hostnamectl set-hostname $1
	#先获取hosts文件中对应的内容 如果没发现对应的内容才能添加这个地址
	cfg=`cat /etc/hosts | grep $2 | grep -wF $1`
	if [ "$cfg" == "" ];then
		#根据ip地址修改hosts文件
		echo "$2 $1" >> /etc/hosts
	else echo "do nothing"
	fi
}

#修改IP静态地址
modify_staticip(){
	#先检查文件是否已被修改过
	chk=`cat /etc/sysconfig/network-scripts/ifcfg-ens33 | grep static`
	if [ "$chk" == "" ];then
	#修改/etc/sysconfig/network-scripts/ifcfg-ens33中的dhcp
	sed -i 's/dhcp/static/' /etc/sysconfig/network-scripts/ifcfg-ens33
	echo "IPADDR=$1" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	echo "NETMASK=255.255.255.0" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	echo "GATEWAY=${1%.*}.2" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	echo "DNS1=114.114.114.114" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	echo "DNS2=8.8.8.8" >> /etc/sysconfig/network-scripts/ifcfg-ens33
	fi
	systemctl restart network
}

#关闭防火墙
close_firewalld(){
	systemctl stop firewalld
	systemctl disable firewalld
}

#修改yum源为阿里源
modify_yumsource(){
	#检查是否已有备份文件 如果有则说明已经做过了
	if [ -e /etc/yum.repos.d/CentOS-Base.repo_bak ];then
		echo "do nothing"
	else
	#首先安装wget命令
	yum install -y wget vim
	#修改yum
	cd /etc/yum.repos.d/
	mv CentOS-Base.repo CentOS-Base.repo_bak
	wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
	yum clean all
	yum makecache
	fi
}

#检查文件的文件夹是否存在 不存在就创建一个
check_soft_folder(){
	if [ -e /opt/soft/$1 ];then
		echo "/opt/soft/$1 folder already exists"
		return 0
	else
		mkdir -p /opt/soft/$1
		return 1
	fi
}

#安装JDK
setup_jdk(){
	check_soft_folder jdk180
	if [ $? == 1 ];then
		#在opt文件夹下搜索jdk的tar.gz文件
		jdkName=`ls /opt/ | grep jdk*`
		#将文件解压到对应的soft文件夹下
		tar -zxf /opt/$jdkName -C /opt/soft/jdk180 --strip-components 1
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#java environment" >> /etc/profile
		echo "export JAVA_HOME=/opt/soft/jdk180" >> /etc/profile
		echo "export CLASSPATH=.:\$JAVA_HOME/jre/lib/rt.jar:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar" >> /etc/profile
		echo "export PATH=\$PATH:\$JAVA_HOME/bin" >> /etc/profile
		source /etc/profile
	fi
}

#安装mysql5.7
setup_mysql(){
	#检查linux的mariadb是否卸载  如果没有 说明没有安装过mysql
	mdb=`rpm -qa | grep mariadb`
	if [ "$mdb" != "" ];then
		rpm -e --nodeps $mdb
		cd /opt/
		wget -i -c http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm
		yum -y install mysql57-community-release-el7-10.noarch.rpm
		yum -y install mysql-community-server
		#修改/etc/my.cnf文件解决中文乱码
		sed -i '/socket/a character-set-server=utf8' /etc/my.cnf
		echo "[client]" >> /etc/my.cnf
		echo "default-character-set=utf8" >> /etc/my.cnf
		echo "[mysql]" >> /etc/my.cnf
		echo "default-character-set=utf8" >> /etc/my.cnf
		systemctl start mysqld.service
		#获取临时密码
		pwdinfo=`grep "password" /var/log/mysqld.log | grep -w password`
		passwd=${pwdinfo#*localhost:}
		passwd=$(echo $passwd)
		#执行修改密码语句
		mysql -uroot -p"$passwd" --connect-expired-password -e "set global validate_password_policy=0"
		mysql -uroot -p"$passwd" --connect-expired-password -e "set global validate_password_length=1"
		mysql -uroot -p"$passwd" --connect-expired-password -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'okok'"
		echo "如下所示"
		echo "$passwd"
		#修改远程登录
		mysql -uroot -pokok -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'okok'"
		mysql -uroot -pokok -e "flush privileges"
		#重启服务
		systemctl restart mysqld.service
	fi
}

#用expect配置全自动ssh免密
expect_ssh(){
	yum install -y expect
	echo "start!"
	myhost=$1
	expect /opt/myshell/nologin.sh $myhost
	echo "end!"
}

#安装hadoop
setup_hadoop(){
	check_soft_folder hadoop260
	if [ $? == 1 ];then
		#在opt文件夹下搜索hadoop的tar.gz文件
		hadoopName=`ls /opt/ | grep hadoop`
		#将文件解压到对应的soft文件夹下
		tar -zxf /opt/$hadoopName -C /opt/soft/hadoop260 --strip-components 1
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#hadoop environment" >> /etc/profile
		echo "export HADOOP_HOME=/opt/soft/hadoop260" >> /etc/profile
		echo "export HADOOP_MAPRED_HOME=\$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_COMMON_HOME=\$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_HDFS_HOME=\$HADOOP_HOME" >> /etc/profile
		echo "export YARN_HOME=\$HADOOP_HOME" >> /etc/profile
		echo "export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native" >> /etc/profile
		echo "export PATH=\$PATH:\$HADOOP_HOME/sbin:\$HADOOP_HOME/bin" >> /etc/profile
		echo "export HADOOP_INSTALL=\$HADOOP_HOME" >> /etc/profile
		source /etc/profile
	fi
}

#配置4个site.xml和一个evn.sh
setup_xml(){
	cd /opt/soft/hadoop260/etc/hadoop/
	sed -i "/<configuration>/a <property>\n<name>fs.defaultFS</name>\n<value>hdfs://$1:9000</value>\n</property>\n<property>\n<name>hadoop.tmp.dir</name>\n<value>/opt/soft/hadoop260/tmp</value>\n</property>\n<property>\n<name>hadoop.proxyuser.root.groups</name>\n<value>*</value>\n</property>\n<property>\n<name>hadoop.proxyuser.root.hosts</name>\n<value>*</value>\n</property>\n<property>\n<name>hadoop.proxyuser.root.users</name>\n<value>*</value>\n</property>" core-site.xml
	sed -i 's/$1/\/opt\/soft\/jdk180/g' core-site.xml
	sed -i '/<configuration>/a <property>\n<name>dfs.replication</name>\n<value>1</value>\n</property>' hdfs-site.xml
	sed -i '/<configuration>/a <property>\n<name>yarn.resourcemanager.localhost</name>\n<value>localhost</value>\n</property>\n<property>\n<name>yarn.nodemanager.aux-services</name>\n<value>mapreduce_shuffle</value>\n</property>' yarn-site.xml
	mv mapred-site.xml.template mapred-site.xml
	sed -i '/<configuration>/a <property>\n<name>mapreduce.framework.name</name>\n<value>yarn</value>\n</property>' mapred-site.xml
	sed -i 's/${JAVA_HOME}/\/opt\/soft\/jdk180/g' hadoop-env.sh
	hadoop namenode -format
	/usr/bin/expect << EOF
spawn hadoop namenode -format
expect {
"(Y or N)" {send "Y\r";exp_continue}
}
spawn start-all.sh
expect {
"(yes/no)?" {send "yes\r";exp_continue}
"(yes/no)?" {send "yes\r";exp_continue}
}
EOF
	
	jps
}

#安装hive
setup_hive(){
	check_soft_folder hive110
	if [ $? == 1 ];then
		#在opt文件夹下搜索hive的tar.gz文件
		hiveName=`ls /opt/ | grep hive`
		#将文件解压到对应的soft文件夹下
		tar -zxf /opt/$hiveName -C /opt/soft/hive110 --strip-components 1
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#hive environment" >> /etc/profile
		echo "export HIVE_HOME=/opt/soft/hive110" >> /etc/profile
		echo "export PATH=\$PATH:\$HIVE_HOME/bin" >> /etc/profile
		source /etc/profile
	fi
}

hive_xml_init(){
	cd /opt/soft/hive110/conf/
	touch hive-site.xml
	echo -e "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n<configuration>\n<property>\n<name>hive.metastore.warehouse.dir</name>\n<value>/hive110/warehouse</value>\n</property>\n<property>\n<name>hive.metastore.local</name>\n<value>true</value>\n</property>\n<property>\n<name>javax.jdo.option.ConnectionURL</name>\n<value>jdbc:mysql://127.0.0.1:3306/hive?createDatabaseIfNotExist=true</value>\n</property>\n<property>\n<name>javax.jdo.option.ConnectionDriverName</name>\n<value>com.mysql.jdbc.Driver</value>\n</property>\n<property>\n<name>javax.jdo.option.ConnectionUserName</name>\n<value>root</value>\n</property>\n<property>\n<name>javax.jdo.option.ConnectionPassword</name>\n<value>okok</value>\n</property>\n</configuration>" >> hive-site.xml
	cp /opt/jar/mysql-connector-java-5.1.35.jar /opt/soft/hive110/lib
	schematool -dbType mysql -initSchema
}

#安装zeppelin
setup_zeppelin(){
	check_soft_folder zeppelin081
	if [ $? == 1 ];then
		#在opt文件夹下搜索zeppelin的tgz文件
		zeppelinName=`ls /opt/ | grep zeppelin`
		#将文件解压到对应的soft文件夹下
		tar -zxf /opt/$zeppelinName -C /opt/soft/zeppelin081 --strip-components 1
		#配置/etc/profile文件
		echo "" >> /etc/profile
		echo "#zeppelin environment" >> /etc/profile
		echo "export ZEPPELIN_HOME=/opt/soft/zeppelin081" >> /etc/profile
		echo "export PATH=\$PATH:\$ZEPPELIN_HOME/bin" >> /etc/profile
		source /etc/profile
		nohup hive --service hiveserver2 &
		jps
	fi
}
zeppelin_xml_init(){
	cd /opt/soft/zeppelin081/conf/
	cp zeppelin-site.xml.template zeppelin-site.xml
	sed -i '/<configuration>/a <property>\n<name>zeppelin.helium.registry</name>\n<value>helium</value>\n</property>' zeppelin-site.xml
	cp zeppelin-env.sh.template zeppelin-env.sh
	sed -i 's/#export JAVA_HOME=/export JAVA_HOME=\/opt\/soft\/jdk180/' zeppelin-env.sh
	sed -i 's/#export HADOOP_CONF_DIR=/export HADOOP_CONF_DIR=\/opt\/soft\/hadoop260\/etc\/hadoop/' zeppelin-env.sh
	zeppelin-daemon.sh start
	cp /opt/soft/hive110/conf/hive-site.xml /opt/soft/zeppelin081/conf/
	cp /opt/soft/hadoop260/share/hadoop/common/hadoop-common-2.6.0-cdh5.14.2.jar  /opt/soft/zeppelin081/interpreter/jdbc/
	cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/zeppelin081/interpreter/jdbc/
	jps
}




#根据用户的选择进行对应的安装
custom_option(){
	case $1 in
		"1")
			modify_sysname $2 $3
			;;
		"2")
			modify_staticip $3
			;;
		"3")
			close_firewalld
			;;
		"4")
			modify_yumsource
			;;
		"5")
			setup_jdk
			;;
		"6")
			setup_mysql
			;;
		"7")
			expect_ssh $2
			;;
		"8")
			setup_hadoop
			;;
		"9")
			setup_xml $3
			;;
		"10")
			setup_hive
			;;
		"11")
			hive_xml_init
			;;
		"12")
			setup_zeppelin
			;;
		"13")
			zeppelin_xml_init
			;;
		"all")
			modify_sysname $2 $3
			modify_staticip $3
			close_firewalld
			modify_yumsource
			setup_jdk
			setup_mysql
			expect_ssh $2
			setup_hadoop
			setup_xml $3
			setup_hive
			hive_xml_init
			setup_zeppelin
			zeppelin_xml_init
			;;
		*)
		echo "please input 1~13 or all"
		;;
	esac
}

#规定$1用户传入必须是IP地址 $2用户传入必须是系统的名称 $3用户安装软件选择
custom_option $3 $2 $1

sys_setup.sh

#!/bin/bash
my_start(){
	if [ $1 == "start" ];then
		#start hadoop
		sh /opt/soft/hadoop260/sbin/start-dfs.sh
		sh /opt/soft/hadoop260/sbin/start-yarn.sh
		#start hive
		nohup /opt/soft/hive110/bin/hive --service hiveserver2 &
		#start zeppelin
		sh /opt/soft/zeppelin081/bin/zeppelin-daemon.sh start
		echo "start over"
	else
		#close zeppelin
		sh /opt/soft/zeppelin081/bin/zeppelin-daemon.sh stop
		#close hive
		hiveprocess=`jps | grep RunJar | awk '{print $1}'`
		kill -9 $hiveprocess
		#stop hadoop
		sh /opt/soft/hadoop260/sbin/stop-dfs.sh
		sh /opt/soft/hadoop260/sbin/stop-yarn.sh
		echo "stop over"
	fi
}
my_start $1

run.sh

#!/usr/bin/expect
set host [lindex $argv 0]
	spawn ssh-keygen -t rsa
	expect {
		"):" {send "\r";exp_continue}
		"):" {send "\r";exp_continue}
		":" {send "\r";exp_continue}
	}
	spawn ssh-copy-id $host
	expect {
		"(yes/no)?" {send "yes\r";exp_continue}
		"password:" {send "ok\r";exp_continue}
	}

nologin.sh

使用说明

下载完成zeppelin和一键安装jdk+mysql+hadoop+hive脚本后
将其分别各自解压,然后将所有解压出来的jar包、文件夹都放置于一台新的linux机器节点(设置root的密码为ok)上
可以通过ip a命令 查看新机器的ip地址,然后用xshell、secureCRT等工具连接,再把这些jar包、文件夹都上传到/opt/目录下
注意 需要在vmware上输入命令,不能在xshell输入,因为脚本中包含更改ip地址的操作,会导致xshell等工具的断开
命令为
source /opt/myshell/sys_setup.sh 想设置的ip地址 想设置的主机名 all

ip地址需要与vmware虚拟网络编辑器中的ip段一致,如我设置的是192.168.100段
source /opt/myshell/sys_setup.sh 192.168.100.141 hd01 all

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值