前言
利用脚本可实现多台虚拟机的JDK,Hadoop,Spark的快速安装配置。
一、免密设置
- 添加主机地址和主机名
[root@master01 ~]# vi /etc/hosts
192.168.22.100 master01
192.168.22.110 slave02
- 生成密钥和公钥
[root@slave02 ~]# ssh-keygen -t rsa
- 运行结果(直接敲回车键)
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:YpLNScO/E55odatyFow63WUqgUqNerWuFTK+FlWgol4 root@slave02
The key's randomart image is:
+---[RSA 2048]----+
| .. |
| . .. |
|. . .+ |
|.. .= + |
|. oE+.*oS . |
|..=o++o=o=o. |
| +.+.+oo==. |
|. +o+.+ =o |
| ooo.. =. |
+----[SHA256]-----+
- 本机免密(slave02端)
[root@slave02 ~]# cd .ssh/
[root@slave02 .ssh]# cat id_rsa.pub >> authorized_keys
[root@slave02 .ssh]# ls
authorized_keys id_rsa id_rsa.pub
- 远程免密(single端)
[root@master01 ~]# cd .ssh/
[root@master01 .ssh]# ssh-copy-id -i id_rsa.pub root@slave02
- 运行结果(要输入slave02的密码)
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@slave02's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@slave02'"
and check to make sure that only the key(s) you wanted were added.
- 远程登录slave02
[root@master01 ~]# ssh slave02
Last login: Mon Jan 11 16:18:32 2021 from master01
[root@slave02 ~]#
- Crtl+D退出远程登录
[root@slave02 ~]# logout
Connection to slave02 closed.
[root@master01 ~]#
二、远程拷贝脚本
- 进入shell并创建allsend.sh
[root@single ~]# cd shell/
[root@single shell]# vim allsend.sh
- 将以下脚本写入allsend.sh
#!/bin/bash
if [ $# -lt 2 ]
then
echo "please input 2 agrs : source and dest path"
exit 0
fi
SERS="master01 master02 slave01 slave02"
echo "start to scp distribute : $1 ..."
for i in $SERS
do
if [ $i != $HOSTNAME ]
then
echo -n "---- $i ---- "
if [ -f $1 ]
then
CMD=`scp $1 root@$i:$2`
elif [ -d $1 ]
then
CMD=`scp -r $1 root@$i:/$2`
fi
echo ok
fi
done
- 将software文件夹(包括了JDK,Hadoop,Spark的安装包)拷贝至各个虚拟机上
[root@single shell]# bash allsend.sh /opt/software/ /opt
- 运行结果
start to scp distribute : /opt/software/ ...
---- master02 ---- ok
---- slave01 ---- ok
---- slave02 ---- ok
- 查看结果(同步操作虚拟机)
三、配置环境变量脚本
- 进入profile.d并创建my.sh
[root@single ~]# cd /etc/profile.d
[root@single profile.d]# vi my.sh
- 将以下环境变量写入my.sh
export JAVA_HOME=/opt/software/jdk180
export HADOOP_HOME=/opt/software/hadoop/hadoop260
export HIVE_HOME=/opt/software/hadoop/hive110
export ZK_HOME=/opt/software/hadoop/zookeeper345
export HBASE_HOME=/opt/software/hadoop/hbase120
export SQOOP_HOME=/opt/software/hadoop/sqoop146
export SPARK_HOME=/opt/software/spark/spark244
export PATH=$SPARK_HOME/bin:$SQOOP_HOME/bin:$HBASE_HOME/bin:$ZK_HOME/bin:$ZK_HOME/sbin:$HIVE_HOME/bin:$HIVE_HOME/lib:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$JAVA_HOME/bin:$PATH
export CLASS_PATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
export LOGDIR=$SQOOP_HOME/mylog/
- 远程拷贝my.sh至各个虚拟机上
[root@single ~]# cd shell
[root@single ~]# bash allsend.sh /etc/profile.d/my.sh /etc/profile
- 运行结果
start to scp distribute : /etc/profile.d/my.sh ...
---- master02 ---- ok
---- slave01 ---- ok
---- slave02 ---- ok
四、远程命令脚本
- 进入shell并创建allcmd.sh
[root@single ~]# cd shell/
[root@single shell]# vim allcmd.sh
- 将以下脚本写入allcmd.sh
#!/bin/bash
if [ $# -ne 1 ];then
echo "please input one sys cmd or key (like 'jps' or 'kfkStart' etc)"
exit 0
fi
FILE=~/start.cmd
LINE=$1
if [ -e $FILE -a -f $FILE ];then
CMD=`cat $FILE|grep "$LINE"`
if [ "$CMD" ];then
CMD=(${CMD//=/ })
LINE=${CMD[1]}
fi
fi
SERS="master01 master02 slave01 slave02"
for i in $SERS;do
echo "--------[ $LINE ] $i"--------
ssh root@$i "${LINE//_/ }"
done
- 脚本中需要调用另外一个文件start.cmd,用来匹配脚本参数和对应的启动服务命令
zkStart=zkServer.sh_start
zkStop=zkServer.sh_stop
zkStat=zkServer.sh_status
kfkStart=kafka-server-start.sh_daemon_/opt/software/spark/kafka241/config/server.properties
kfkStop=kafka-server-stop.sh_stop
- 执行脚本
[root@single shell]# bash allcmd.sh jps
- 运行结果
------------------ [ jps ] master01 ------------------
12685 Jps
------------------ [ jps ] master02 ------------------
1928 Jps
------------------ [ jps ] slave01 ------------------
1837 Jps
------------------ [ jps ] slave02 ------------------
9540 Jps