大数据集群一键启动脚本

1.一键启动

大数据集群启动脚本,包括zk,hadoop,spark,kafka,hbase,redisclickhouse。

vi startall.sh

#!/bin/bash
SPARK_HOME=/opt/module/spark
REDIS_HOME=/opt/module/redis
/bin/zk_shtd.sh start
/bin/hadoop_shtd.sh start
/bin/hive_shtd.sh start
$SPARK_HOME/sbin/start-history-server.sh
/bin/kafka_shtd.sh start
/bin/hbase_shtd.sh start
$REDIS_HOME/bin/redis-server /opt/module/redis/etc/redis.conf
/bin/clickhouse-server_shtd.sh start
/bin/jpsall

2.zk

vi /bin/zk_shtd.sh

#!/bin/bash
ZK_HOME=/opt/module/zookeeper
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
    echo "No Args Input..."
    exit ;
fi

case $1 in
"start")
        for i in ${names[@]}
    do
        echo "=====================  $i  ======================="
        ssh $i "${ZK_HOME}/bin/zkServer.sh start"
    done
;;
"stop")
        for i in ${names[@]}
    do
        echo "=====================  $i  ======================="
        ssh $i "${ZK_HOME}/bin/zkServer.sh stop"
    done
;;
"status")
        for i in ${names[@]}
    do
        echo "=====================  $i  ======================="
        ssh $i "${ZK_HOME}/bin/zkServer.sh status"
    done
;;
*)
    echo "Input Args Error..."
;;
esac

3. hadoop

vi /bin/hadoop_shtd.sh

#!/bin/bash
HADOOP_HOME=/opt/module/hadoop
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
    echo "No Args Input..."
    exit ;
fi

case $1 in
"start")
        echo " =================== START hadoop ==================="

        echo " --------------- start  hdfs ---------------"
        ssh  ${names[0]} "$HADOOP_HOME/sbin/start-dfs.sh"
        echo " --------------- start yarn ---------------"
        ssh ${names[1]}  "$HADOOP_HOME/sbin/start-yarn.sh"
        echo " --------------- start historyserver ---------------"
        ssh ${names[0]}  "$HADOOP_HOME/bin/mapred --daemon start historyserver"
;;
"stop")
        echo " =================== STOP hadoop ==================="

        echo " --------------- stop historyserver ---------------"
        ssh ${names[0]}  "$HADOOP_HOME/bin/mapred --daemon stop historyserver"
        echo " ---------------stop yarn ---------------"
        ssh ${names[1]}  "$HADOOP_HOME/sbin/stop-yarn.sh"
        echo " ---------------stop hdfs ---------------"
        ssh ${names[0]}  "$HADOOP_HOME/sbin/stop-dfs.sh"
;;
*)
    echo "Input Args Error..."
;;
esac

4. hive

vi /bin/hive_shtd.sh

#!/bin/bash
HIVE_HOME=/opt/module/hive
HIVE_LOG_DIR=$HIVE_HOME/logs
if [ ! -d $HIVE_LOG_DIR ]
then
        mkdir -p $HIVE_LOG_DIR
fi
function check_process()
{
    pid=$(ps -ef 2>/dev/null | grep -v grep | grep -i $1 | awk '{print $2}')
    ppid=$(netstat -nltp 2>/dev/null | grep $2 | awk '{print $7}' | cut -d '/' -f 1)
    echo $pid
    [[ "$pid" =~ "$ppid" ]] && [ "$ppid" ] && return 0 || return 1
}

function hive_start()
{
    metapid=$(check_process HiveMetastore 9083)
    cmd="nohup hive --service metastore >$HIVE_LOG_DIR/metastore.log 2>&1 &"
    cmd=$cmd"hdfs dfsadmin -safemode wait >/dev/null 2>&1"
    [ -z "$metapid" ] && eval $cmd || echo "Metastroe START"
    server2pid=$(check_process HiveServer2 10000)
    cmd="nohup hive --service hiveserver2 >$HIVE_LOG_DIR/hiveServer2.log 2>&1 &"
    [ -z "$server2pid" ] && eval $cmd || echo "HiveServer2 START"
}

function hive_stop()
{
    metapid=$(check_process HiveMetastore 9083)
    [ "$metapid" ] && kill $metapid || echo "Metastore stop"
    server2pid=$(check_process HiveServer2 10000)
    [ "$server2pid" ] && kill $server2pid || echo "HiveServer2 stop"
}

case $1 in
"start")
    hive_start
    ;;
"stop")
    hive_stop
    ;;
"restart")
    hive_stop
    sleep 2
    hive_start
    ;;
"status")
    check_process HiveMetastore 9083 >/dev/null && echo "Metastore status" || echo "Metastore status"
    check_process HiveServer2 10000 >/dev/null && echo "HiveServer2 status" || echo "HiveServer2 status"
    ;;
*)
    echo Invalid Args!
    echo 'Usage: '$(basename $0)' start|stop|restart|status'
    ;;
esac


--------------------------------------------------------
第二种,只有启动
#!/bin/bash
HIVE_HOME=/opt/module/hive
#start metastore
${HIVE_HOME}/bin/hive --service metastore >>${HIVE_HOME}/logs/metastore.log 2>&1 &
echo "start metastore"
${HIVE_HOME}/bin/hiveserver2 >>${HIVE_HOME}/logs/hiveserver2.log 2>&1 &
echo "start hiveserver2"

5.kafka

vi /bin/kafka_shtd.sh

#!/bin/bash
KAFKA_HOME=/opt/module/kafka
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
  echo "Input Args Error....."
  exit
fi
case $1 in
start)
 sleep 10
;;
esac
for i in ${names[@]}
do

case $1 in
start)
  echo "==================START $i KAFKA==================="
  ssh $i $KAFKA_HOME/bin/kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
;;
stop)
  echo "==================STOP $i KAFKA==================="
  ssh $i $KAFKA_HOME/bin/kafka-server-stop.sh stop
;;

*)
 echo "Input Args Error....."
 exit
;;
esac

done

6.hbase

vi /bin/hbase_shtd.sh

#!/bin/bash
HBASE_HOME=/opt/module/hbase
if [ $# -lt 1 ]
then
  echo "Input Args Error....."
  exit
fi


case $1 in
start)
 sleep 10
;;
esac


case $1 in
start)
  echo "==================START HBASE==================="
  $HBASE_HOME/bin/start-hbase.sh
;;
stop)
  echo "==================STOP HBASE==================="
  $HBASE_HOME/bin/stop-hbase.sh
;;

*)
 echo "Input Args Error....."
 exit
;;
esac

7.clickhouse

vi /bin/clickhouse-server_shtd.sh

#!/bin/bash
if [ $# -lt 1 ]
then
  echo "Input Args Error....."
  exit
fi

case $1 in
start)
  echo "==================START clickhouse==================="
  /etc/init.d/clickhouse-server start
;;
stop)
  echo "==================STOP clickhouse==================="
  /etc/init.d/clickhouse-server stop
;;
status)
  echo "==================STOP clickhouse==================="
  systemctl status clickhouse-server
;;

*)
 echo "Input Args Error....."
 exit
;;
esac

8.xsync

vi /bin/xsync
rsync: command not found:yum install rsync -y

#!/bin/bash
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
  echo not enough argument1
  exit;
fi
for host in ${names[@]}
do
  echo ========$host=========
  for file in $@
  do
    if [ -e $file ]
    then
      pdir=$(cd -P $(dirname $file);pwd)
      fname=$(basename $file)
      ssh $host "mkdir -p $pdir"
      rsync -av $pdir/$fname $host:$pdir
    else
      echo $file does not exists!
    fi
  done
done

9.jpsall

vi /bin/jpsall

#!/bin/bash
names=(master slave1 slave2)
for host in ${names[@]}
do
  echo "========$host========="
  ssh ${host} "jps"
done

10.maxwell

#!/bin/bash
MAXWELL_HOME=/opt/module/maxwell
status_maxwell(){
        pid=`ps -ef |grep maxwell | grep -v grep | grep maxwell | awk '{print $2}'`
	if [[ -z $pid ]]
	then
		echo "maxwell not running !!!"
	else
	        echo $pid
	fi
}
start_maxwell(){
	
	if [[ -z $pid ]]
	then
		
		$MAXWELL_HOME/bin/maxwell --config $MAXWELL_HOME/config.properties --daemon
		echo "=====maxwell start!!====="
	else
		echo "maxwell 启动了,不需要再启动。。。"
	fi
}


stop_maxwell(){
	if [[ -z $pid ]]
	then
		echo '====maxwell 没启动===='
	else
		kill -9 $pid
		echo 'kill 掉了maxwell===$pid'
	fi
}

if [ $# -lt 1 ]
then
	start_maxwell
else
	pid=`status_maxwell`	
	case $1 in
	"start")
		start_maxwell
		;;
	"stop")
		stop_maxwell
		;;
	"status")
		echo $pid
		;;

	*)
		echo "start,stop,status"	
		;;
	esac
fi

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

厨 神

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值