大数据中各种组件的启动停止脚本

#zookeeper启动脚本
#!bash/bin

case $1 in
	"start")
	for i in node01 node02 node03
	do  
	    echo"----------$i-------------"
		ssh $i "/opt/apache/zookeeper/bin/zkServer.sh start"
	done
	;;
	"stop")
	for i in node01 node02 node03
	do  
	    echo"----------$i-------------"
		ssh $i "/opt/apache/zookeeper/bin/zkServer.sh stop"
	done
	;;
	
	"status")
	
	for i in node01 node02 node03
	do  
	    echo"----------$i-------------"
		ssh $i "/opt/apache/zookeeper/bin/zkServer.sh status"
	done
	;;
	esac
	
	
	#!/bin/bash
#zookeeper 进程号
ZK_PID=`ps -ef|grep zookeeper|grep -v grep|awk '{print $2}'`
#kafka 进程号
KAFKA_PID=`ps -ef|grep kafka|grep -v grep|awk '{print $2}'`
#机器的hostname
NODE_NAME=`hostname`
#选址“启动服务”
case $1 in "start")
for i in node01 node02 node03
   {
#如果zookeeper进程存在
if [ -n "$ZK_PID" ];then
  echo "zkServer is already started"
else
  echo "starting zkServer.....";
  sh $i /opt/zookeeper/bin/zkServer.sh "start";
  if [ $? -eq 0 ]; then
    echo "zkServer of $NODE_NAME started Success!"
  else
    echo "zkServer of $NODE_NAME started failure!"
  fi
fi
#如果kafka进程存在
if [ -n "$KAFKA_PID" ];then
  echo "KAFKA Server is already started"
else
  echo "starting KAFKA Server.....";
#启动kafka
  sh $i "/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties"
  if [ $? -eq 0 ]; then
    echo "KAFKA Server of $NODE_NAME started Success!"
  else
    echo "KAFKA Server of $NODE_NAME started failure!"
  fi
fi
 
};;
#如果选址“停止服务”
"stop")
for i in node01 node02 node03
{
if [ -z "$KAFKA_PID" ];then
  echo "KAFKA Server is already stopped"
else
  echo "stopping KAFKA Server.....";
  sh $i "/opt/kafka/bin/kafka-server-stop.sh"
  if [ $? -eq 0 ]; then
    echo "KAFKA Server of $NODE_NAME stopped Success!"
  else
    echo "KAFKA Server of $NODE_NAME stopped failure!"
  fi
fi 
};;
esac


#flume脚本启动
#!bin/bash
case $1 in
"start)"{
	for i in node03
	do
		echo "--------启动flume $i 消费flume----------"
		ssh $i "nohup /home/software/flume/bin/flume-ng agent 
		--conf-file /opt/hmoe/software/flume/conf/kafka-flume-hdfs.conf 
		--nameal -Dflume.root.logger=INFO,LOGFILE>/opt/flume/log2.txt 2>&1 &"
	done
	}
	;;
	"stop)"{
	for i in node03
	do
		echo "-------停止flume $i-------消费flume------"
		ssh $i "ps -ef | grep kafka-flume-hdfs | grep -v grep | awk '{print \$2}' | xargs -nl kill"
	done
	};;
	esac

集群分发脚本:
#!/bin/bash
#1 获取输入参数个数,如果没有参数,直接退出
pcount=$#
if [ $pcount -lt 1 ]
then
    echo Not Enough Arguement!
    exit;
fi

#2. 遍历集群所有机器
# 也可以采用:
# for host in node{01..03};
for host in node01 mode02 node03
do
    echo ====================    $host    ====================
    #3. 遍历所有目录,挨个发送
    for file in $@
    do
        #4 判断文件是否存在
        if [ -e $file ]
        then
            #5. 获取父目录
            pdir=$(cd -P $(dirname $file); pwd)
            echo pdir=$pdir
            
            #6. 获取当前文件的名称
            fname=$(basename $file)
            echo fname=$fname
            
            #7. 通过ssh执行命令:在$host主机上递归创建文件夹(如果存在该文件夹)
            ssh $host "mkdir -p $pdir"
            
			#8. 远程同步文件至$host主机的$USER用户的$pdir文件夹下
            rsync -av $pdir/$fname $USER@$host:$pdir
        else
            echo $file does not exists!
        fi
    done
done

Hadoop停启动脚本:
#!/bin/bash
if [ $# -lt 1 ]
then
	echo "No Args Input..."
	exit ;
fi

case $1 in
"start")
	echo " =================== 启动 hadoop 集群 ==================="
	echo " --------------- 启动 hdfs ---------------"
	ssh node01 "/usr/local/hadoop/hadoop-2.9.2/sbin/start-dfs.sh"
	echo " --------------- 启动 yarn ---------------"
	ssh node02 "/usr/local/hadoop/hadoop-2.9.2/sbin/start-yarn.sh"
	echo " --------------- 启动 historyserver ---------------"
	ssh node01 "/usr/local/hadoop/hadoop-2.9.2/bin/mapred --daemon start historyserver"
;;
"stop")
	echo " =================== 关闭 hadoop 集群 ==================="
	echo " --------------- 关闭 historyserver ---------------"
	ssh noed01 "/usr/local/hadoop/hadoop-2.9.2/bin/mapred --daemon stop historyserver"
	echo " --------------- 关闭 yarn ---------------"
	ssh node02 "/usr/local/hadoop/hadoop-2.9.2/sbin/stop-yarn.sh"
	echo " --------------- 关闭 hdfs ---------------"
	ssh node01 "/usr/local/hadoop/hadoop-2.9.2/sbin/stop-dfs.sh"
;;
*)
	echo "Input Args Error..."
;;
esac

kafka启动停止脚本:
#!/bin/bash

case $1 in
"start")
       for i in node01 node02 node03

	do
	echo "--------启动 $i-------kafka"
		ssh $i "/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties"
	done
;;
"stop")
     for i in node01 node02 node03
           
          do
 	echo "--------停止 $i-------kafka"
	ssh $i "/opt/kafka/bin/kafka-server-stop.sh "
         done
;;
esac

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值