目录
拉取所有节点的jps myjps
- 在普通用户的家目录下新建bin目录,专门存放脚本
/home/admin/bin
- 在 bin 目录下 vim 文件 myjps.sh
#!/bin/bash
for(( i=102;i<=104;i=$i+1 ))
do
echo "------------ hadoop$i ------------"
ssh hadoop$i /opt/module/jdk1.8.0_212/bin/jps
done
- 赋予脚本可执行权限
sudo chmod u+x myjps.sh
同步文件脚本 xsync
同步命令 xsync [文件名]
#!/bin/bash
#1. 判断参数个数,小于1个,报错
if [ $# -lt 1 ];then # $#:表示获取输入参数的个数
echo Not Enough Arguement!
exit;
fi
#2. 遍历集群所有机器
for ((i=102; i<=104; i=$i+1))
do
echo "*********************** hadoop$i ***********************"
#3. 遍历所有目录,挨个发送
for file in $@
do
#4 判断文件是否存在
if [ -e $file ];then
#5. 获取父目录
pdir=$(cd -P $(dirname $file);pwd)
#6. 获取当前文件的名称
fname=$(basename $file)
ssh hadoop$i "mkdir -p $pdir" # 加-p:若目录已经存在,忽略
rsync -av $pdir/$fname hadoop$i:$pdir
else
echo $file does not exists!
fi
done
done
如果要同步的文件[my_env.sh]具有root权限,需要跟上脚本的全路径
sudo /home/atguigu/bin/xsync /etc/profile.d/my_env.sh
个别参数解释:
1)
2)
3)
hadoop启停脚本 hdp.sh
脚本用法:
启动命令 hdp.sh start
关闭命令 hdp.sh stop
#!/bin/bash
case $1 in
"start")
echo "---------------------- 启动hdfs -------------------------"
ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/start-dfs.sh"
echo "---------------------- 启动yarn -------------------------"
ssh hadoop103 "/opt/module/hadoop-3.1.3/sbin/start-yarn.sh"
echo "---------------------- 启动历史服务器 -------------------------"
ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/mr-jobhistory-daemon.sh start historyserver"
;;
"stop")
echo "---------------------- 关闭历史服务器 -------------------------"
ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/mr-jobhistory-daemon.sh stop historyserver"
echo "---------------------- 关闭yarn -------------------------"
ssh hadoop103 "/opt/module/hadoop-3.1.3/sbin/stop-yarn.sh"
echo "---------------------- 关闭hdfs -------------------------"
ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/stop-dfs.sh"
;;
*)
echo error!
;;
esac
xcall.sh 命令
脚本用法
xcall.sh jps
#!/bin/bash
parm=$*
for(( i=102;i<=104;i=$i+1 ))
do
echo "------------ hadoop$i -------------"
ssh hadoop$i "$parm"
done
zk启停脚本 zk.sh
启动命令 zk.sh start
关闭命令 zk.sh stop
#!/bin/bash
case $1 in
"start")
for ((i=102;i<=104;i=$i+1))
do
echo "----------------------- 启动hadoop$i 的zk ---------------------------"
ssh hadoop$i "/opt/module/zookeeper/bin/zkServer.sh start"
done
;;
"stop")
for ((i=102;i<=104;i=$i+1))
do
echo "----------------------- 关闭hadoop$i 的zk ---------------------------"
ssh hadoop$i "/opt/module/zookeeper/bin/zkServer.sh stop"
done
;;
"status")
for ((i=102;i<=104;i=$i+1))
do
echo "----------------------- zk status ---------------------------"
ssh hadoop$i "/opt/module/zookeeper/bin/zkServer.sh status"
done
;;
*)
echo error!
;;
esac
flume 启停脚本 f1.sh-消费
#! /bin/bash
case $1 in
"start"){
for i in hadoop102 hadoop103
do
echo " --------启动 $i 采集flume -------"
ssh $i "nohup /opt/module/flume/bin/flume-ng agent --conf-file /opt/module/flume/conf/file-flume-kafka.conf --name a1 -Dflume.root.logger=INFO,LOGFILE >/opt/module/flume/log1.txt 2>&1 &"
done
};;
"stop"){
for i in hadoop102 hadoop103
do
echo " --------停止 $i 采集flume -------"
ssh $i "ps -ef | grep file-flume-kafka | grep -v grep |awk '{print \$2}' | xargs -n1 kill -9 "
done
};;
esac
说明:
- $2是在“”双引号内部会被解析为脚本的第二个参数,但是这里面想表达的含义是awk的第二个值,所以需要将他转义,用$2表示。
flume 启停脚本 f2.sh-生产
#! /bin/bash
case $1 in
"start"){
for i in hadoop104
do
echo " --------启动 $i 消费flume-------"
ssh $i "nohup /opt/module/flume/bin/flume-ng agent --conf-file /opt/module/flume/conf/kafka-flume-hdfs.conf --name a1 -Dflume.root.logger=INFO,LOGFILE >/opt/module/flume/log2.txt 2>&1 &"
done
};;
"stop"){
for i in hadoop104
do
echo " --------停止 $i 消费flume-------"
ssh $i "ps -ef | grep kafka-flume-hdfs | grep -v grep |awk '{print \$2}' | xargs -n1 kill"
done
};;
esac
kakfa启停脚本 kf.sh
启动命令 kf.sh start
关闭命令 kf.sh stop
#! /bin/bash
case $1 in
"start"){
for i in hadoop102 hadoop103 hadoop104
do
echo " --------启动 $i Kafka-------"
ssh $i "/opt/module/kafka/bin/kafka-server-start.sh -daemon /opt/module/kafka/config/server.properties"
done
};;
"stop"){
for i in hadoop102 hadoop103 hadoop104
do
echo " --------停止 $i Kafka-------"
ssh $i "/opt/module/kafka/bin/kafka-server-stop.sh "
done
};;
esac
superset 启停脚本 状态查看
#! /bin/bash
superset_status(){
result=`ps -ef | awk '/gunicorn/ && !/awk/{print $2}' | wc -l`
if [[ $result -eq 0 ]]; then
return 0
else
return 1
fi
}
superset_start(){
# 该段内容取自~/.bashrc,所用是进行 conda 初始化
# >>> conda initialize >>>
# !! Contents within this block are managed by 'condainit' !!
__conda_setup="$('/opt/module/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/opt/module/miniconda3/etc/profile.d/conda.sh" ]; then
. "/opt/module/miniconda3/etc/profile.d/conda.sh"
else
export PATH="/opt/module/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
superset_status >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
conda activate superset ; gunicorn --workers 5 --timeout 120 --bind hadoop201:8787 --daemon 'superset.app:create_app()'
else
echo "superset 正在运行"
fi
}
superset_stop(){
superset_status >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo "superset 未在运行"
else
ps -ef | awk '/gunicorn/ && !/awk/{print $2}' | xargs kill -9
fi
}
case $1 in
start )
echo "启动 Superset"
superset_start
;;
stop )
echo "停止 Superset"
superset_stop
;;
restart )
echo "重启 Superset"
superset_stop
superset_start
;;
status )
superset_status >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo "superset 未在运行"
else
echo "superset 正在运行"
fi
esac
总控
#!/bin/bash
case $1 in
"start"){
echo "------------------------ 启动集群 ------------------------"
echo "****** 启动zk ******"
zk.sh start
echo "****** 启动hadoop ******"
hdp.sh start
echo "****** 启动kafka ******"
kk.sh start
echo "****** 启动生产flume ******"
f1.sh start
echo "****** 启动消费flume ******"
f2.sh start
};;
"stop"){
echo "------------------------ 启动集群 ------------------------"
echo "****** 关闭消费flume ******"
f2.sh stop
echo "****** 关闭生产flume ******"
f1.sh stop
echo "****** 关闭kafka ******"
kk.sh stop
echo "****** 关闭hadoop ******"
hdp.sh stop
echo "****** 关闭zk ******"
zk.sh stop
};;
*){
echo error
}
;;
esac