1.配置高可用
6.1启动zookeeper
自己写了zookeeper启动脚本(脚本如下,后面会写zookeeper课程)
#!/bin/bash
for host in bigdata01 bigdata02 bigdata03
do
echo "${host}:${1}ing... ..."
ssh $host "source /etc/profile;/root/training/zookeeper-3.4.6/bin/zkServer.sh $1"
done
sleep 2
for host in bigdata01 bigdata02 bigdata03
do
ssh $host "source /etc/profile;/root/training/zookeeper-3.4.6/bin/zkServer.sh status"
done
启动命令:zkmanage.sh start
2.配置高可用
# - SPARK_WORKER_CORES, to set the number of cores to use on this machine
# - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
export JAVA_HOME=/root/training/jdk1.8.0_144/
#export SPARK_WORKER_CORES=2
#export SPARK_WORKER_MEMORY=2g
#export SPARK_MASTER_HOST=bigdata01 无需配置
#export SPARK_MASTER_PORT=7077 默认端口7077
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=bigdata01:2181,bigdata02:2181,bigdata03:2181 -Dspark.deploy.zookeeper.dir=/spark"
快捷键 ctrl+R
for i in {2..4};do scp -r /root/training/spark-2.2.0-bin-hadoop2.7/conf/spark-env.sh bigdata0$i:$PWD ;done
4.启动高可用
删除 rmr /spark 防止已经在zookeeper上注册过
bigdata01
cd /root/training/spark-2.2.0-bin-hadoop2.7
sbin/start-all.sh
bigdata02
cd /root/training/spark-2.2.0-bin-hadoop2.7
sbin/start-master.sh
http://bigdata01:8080/ (ALIVE)
http://bigdata02:8080/ (STANDBY)
3.测试高可用
jps
16711 Master
kill -9 16711
http://bigdata02:8080/ (ALIVE)
启动bigdata01 sbin/start-master.sh
http://bigdata01:8080/ (STANDBY)
4.停止高可用集群
sbin/stop-all.sh
sbin/stop-master.sh