1. 各组件启动顺序
zookeeper--hadoop--hbase--kafka--storm--spark
zookeeper-3.4.12.tar.gz
apache-storm-1.2.1.tar.gz
kafka_2.12-1.1.0.tgz
hbase-2.0.0-bin.tar.gz
mysql-connector-java-8.0.11.zip
apache-hive-2.3.3-bin.tar.gz
sqoop-1.4.7.tar.gz
2. 集群启动shell命令
#!/bin/bash
# nimbus节点
nimbusServers='master'
# supervisor节点
supervisorServers='master slave01 slave02'
# all节点
BROKERS="master slave01 slave02"
echo -e "\033[31m ==========================Start The Cluster============================= \033[0m"
echo -e "\033[31m =====================Starting Zookeeper Now !!!========================= \033[0m"
for broker in $BROKERS
do
ssh hadoop@$broker "source /etc/profile && /home/hadoop/tools/zookeeper/bin/zkServer.sh start"
echo 从节点 $broker 启动zookeeper...[ done ]
sleep 1
done
echo -e "\033[31m ======================Starting Hadoop Now !!!============================ \033[0m"
/home/hadoop/tools/hadoop3/sbin/start-all.sh
/home/hadoop/tools/hadoop3/sbin/mr-jobhistory-daemon.sh start historyserver
echo -e "\033[31m =======================Starting HBase Now !!!=============================\033[0m"
/home/hadoop/tools/hbase/bin/start-hbase.sh
echo -e "\033[31m ========================Starting Kafka Now !!!============================\033[0m"
for broker in $BROKERS
do
ssh hadoop@$broker 'source /etc/profile && /home/hadoop/tools/kafka/bin/kafka-server-start.sh /home/hadoop/tools/kafka/config/server.properties 1>/dev/null 2>&1 &' &
#ssh hadoop@$broker "/home/hadoop/tools/kafka/bin/kafka-server-start.sh /home/hadoop/tools/kafka/config/server.properties 1>/dev/null 2>&1 &"
echo 从节点 $broker 启动kafka...[ done ]
sleep 1
done
echo -e "\033[31m ==========================Starting Storm Now !!!=============================== \033[0m"
#启动所有的nimbus
for nim in $nimbusServers
do
ssh hadoop@$nim "source /etc/profile && /home/hadoop/tools/storm/bin/storm nimbus >/dev/null 2>&1 &"
echo 从节点 $nim 启动nimbus...[ done ]
sleep 1
done
#启动所有的ui
for u in $nimbusServers
do
ssh hadoop@$u "source /etc/profile && /home/hadoop/tools/storm/bin/storm ui >/dev/null 2>&1 &"
echo 从节点 $u 启动ui...[ done ]
sleep 1
done
#启动所有的supervisor
for visor in $supervisorServers
do
ssh hadoop@$visor "source /etc/profile && /home/hadoop/tools/storm/bin/storm supervisor >/dev/null 2>&1 &"
echo 从节点 $visor 启动supervisor...[ done ]
sleep 1
done
echo -e "\033[31m =============================Starting Spark Now !!!============================ \033[0m"
/home/hadoop/tools/spark2/sbin/start-all.sh
echo -e "\033[31m =====================The Result Of The Command \"master jps\" :================ \033[0m"
jps
echo -e "\033[31m =====================The Result Of The Command \"slave01 jps\" :================ \033[0m"
ssh hadoop@slave01 'source /etc/profile && jps'
echo -e "\033[31m =====================The Result Of The Command \"slave02 jps\" :================ \033[0m"
ssh hadoop@slave02 'source /etc/profile && jps'
echo -e "\033[31m ======================================END======================================== \033[0m"
3. 集群停止shell命令
#!/bin/bash
#nimbus节点
nimbusServers='master'
#supervisor节点
supervisorServers='master slave01 slave02'
echo -e "\033[31m ===== Stoping The Cluster ====== \033[0m"
echo -e "\033[31m ========================================Stoping Spark Now !!!====================================== \033[0m"
/home/hadoop/tools/spark2/sbin/stop-all.sh
echo -e "\033[31m ===================================Stoping Storm Now !!!==================================== \033[0m"
#停止所有的nimbus和ui
for nim in $nimbusServers
do
echo 从节点 $nim 停止nimbus和ui...[ done ]
ssh $nim "kill -9 `ssh $nim ps -ef | grep nimbus | awk '{print $2}'| head -n 1`" >/dev/null 2>&1
ssh $nim "kill -9 `ssh $nim ps -ef | grep core | awk '{print $2}'| head -n 1`" >/dev/null 2>&1
done
#停止所有的supervisor
for visor in $supervisorServers
do
echo 从节点 $visor 停止supervisor...[ done ]
ssh $visor "kill -9 `ssh $visor ps -ef | grep supervisor | awk '{print $2}'| head -n 1`" >/dev/null 2>&1
done
echo -e "\033[31m ======================================Stoping Kafka Now !!!===================================== \033[0m"
for nim in $supervisorServers
do
ssh hadoop@$nim "source /etc/profile && /home/hadoop/tools/kafka/bin/kafka-server-stop.sh &" &
echo 从节点 $nim 停止Kafka...[ done ]
sleep 1
done
echo -e "\033[31m ========================================Stoping HBase Now !!!======================================== \033[0m"
/home/hadoop/tools/hbase/bin/stop-hbase.sh
echo -e "\033[31m ========================================Stopting Hadoop Now !!!======================================= \033[0m"
/home/hadoop/tools/hadoop3/sbin/stop-all.sh
/home/hadoop/tools/hadoop3/sbin/mr-jobhistory-daemon.sh stop historyserver
echo -e "\033[31m ========================================Stoping Zookeeper Now !!!===================================== \033[0m"
for nim in $supervisorServers
do
ssh hadoop@$nim "source /etc/profile && /home/hadoop/tools/zookeeper/bin/zkServer.sh stop" &
echo 从节点 $nim 停止zookeeper...[ done ]
sleep 1
done
echo -e "\033[31m ========================================The Result Of The Command \"jps\" : =========================== \033[0m"
for nim in $supervisorServers
do
echo 从节点 $nim 查看jps...
ssh hadoop@$nim "source /etc/profile && jps" &
sleep 1
done
echo -e "\033[31m ======END======== \033[0m"