一、背景介绍
平时工作中,因为负责的机器、集群服务等一些重复性的任务较多,而且又全是基于centos命令行下操作完成的,故想到是否可以将这些杂活全部自动化。如:
- 快速查看所有机器负载信息、磁盘容量、进程等关键信息。
- 快速重启一组机器、服务、部署django应用服务等等。
二、示例
from fabric.api import local, env, execute, roles, run, lcd, cd, settings
env.roledefs = {
'hadoopServer': ['hadoop@192.168.72.236:22', 'hadoop@192.168.72.237:22',
'hadoop@192.168.72.234:22', 'hadoop@192.168.72.235:22'],
'bigdataServer': ['bigdata@192.168.72.238:22', 'bigdata@192.168.72.239:22',
'bigdata@192.168.72.240:22']
}
@roles('bigdataServer', 'hadoopServer')
def show_fiseSize():
run('df -lh')
@roles('bigdataServer', 'hadoopServer')
def show_load():
run('uptime')
@roles('bigdataServer', 'hadoopServer')
def show_process():
run('jps')
@roles('hadoopServer')
def control_start_hadoop():
with cd('/home/hadoop/hadoop-2.7.2'):
with settings(warn_only=True):
run('sbin/start-dfs.sh')
run('sbin/start-yarn.sh')
@roles('hadoopServer')
def control_stop_hadoop():
with cd('/home/hadoop/hadoop-2.7.2'):
with settings(warn_only=True):
run('sbin/stop-dfs.sh')
run('sbin/stop-yarn.sh')
@roles('bigdataServer', 'hadoopServer')
def control_start_zookeeper():
with cd('/opt/zookeeper-3.4.8'):
with settings(warn_only=True):
run('bin/zkServer.sh start')
@roles('bigdataServer', 'hadoopServer')
def control_stop_zookeeper():
with cd('/opt/zookeeper-3.4.8'):
with settings(warn_only=True):
run('bin/zkServer.sh stop')
@roles('hadoopServer')
def control_start_spark():
with cd('/home/hadoop/spark-2.2.0'):
with settings(warn_only=True):
run('sbin/start-all.sh')
@roles('hadoopServer')
def control_stop_spark():
with cd('/home/hadoop/spark-2.2.0'):
with settings(warn_only=True):
run('sbin/stop-all.sh')
@roles('hadoopServer')
def control_start_hbase():
with cd('/home/hadoop/hbase-1.2.6'):
with settings(warn_only=True):
run('bin/start-hbase.sh')
@roles('hadoopServer')
def control_stop_hbase():
with cd('/home/hadoop/hbase-1.2.6'):
with settings(warn_only=True):
run('bin/stop-hbase.sh')
@roles('bigdataServer')
def control_start_es():
with cd('/opt/elasticsearch-5.5.1'):
with settings(warn_only=True):
run('bin/elasticsearch -d -p pid')
@roles('bigdataServer')
def control_stop_es():
with cd('/opt/elasticsearch-5.5.1'):
with settings(warn_only=True):
run('kill -9 `cat pid`')
@roles('bigdataServer')
def control_start_kafka():
with cd('/opt/kafka_2.11-0.11.0.0'): # cd用于进入某个目录
with settings(warn_only=True):
run('bin/kafka-server-start.sh -daemon config/server.properties') # 远程操作用run
@roles('bigdataServer')
def control_stop_kafka():
with cd('/opt/kafka_2.11-0.11.0.0'): # cd用于进入某个目录
with settings(warn_only=True):
run('bin/kafka-server-stop.sh') # 远程操作用run
def dotask(type='stop'):
# execute(task1)
# execute(task2)
print '==========显示各服务器磁盘信息=========='
execute(show_fiseSize)
print ''
print '==========显示各服务器负载信息=========='
execute(show_load)
print ''
print '==========显示各服务器进程信息=========='
execute(show_process)
print ''
print '==========%s kafka集群=========='%(type)
if type == 'start':
execute(control_start_kafka)
elif type == 'stop':
execute(control_stop_kafka)
print ''
print '==========%s es集群==========' % (type)
if type == 'start':
execute(control_start_es)
elif type == 'stop':
execute(control_stop_es)
print ''
print '==========%s hadoop集群==========' % (type)
if type == 'start':
execute(control_start_hadoop)
elif type == 'stop':
execute(control_stop_hadoop)
print ''
print '==========%s spark集群==========' % (type)
if type == 'start':
execute(control_start_spark)
elif type == 'stop':
execute(control_stop_spark)
print ''
print '==========%s hbase集群==========' % (type)
if type == 'start':
execute(control_start_hbase)
elif type == 'stop':
execute(control_stop_hbase)
三、优点和缺点
声明:博客内容都是本人验证过的功能和使用总结,鉴于工作原因和时间关系,可能部分描述和总结不准确,望广大博友留言明示。本人也会对[原创]部分的内容不断迭代、更正。希望本博客内容能给大家前行的路上,带一点点光茫。