#!/usr/bin/env python
import commands
import time
monitor_host = [
'dn1.h.cc.com',
'dn2.h.cc.com',
'dn3.h.cc.com',
]
status_dic = {
}
def monitor_cmd():
for h in monitor_host:
print “########################## %s start time: %s ###########################\n\n” %(h,time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()))
impala_status,impala_result = commands.getstatusoutput(“ssh -p 22 %s ‘ps aux | grep impala|grep -v grep’” %h)
if impala_status != 0:
commands.getstatusoutput(“ssh -p 22 %s ‘/etc/init.d/impala-server restart’” %h)
status_dic['%s_impala_status' %h] = impala_status
else:
status_dic['%s_impala_status' %h] = impala_status
dn_status,dn_result = commands.getstatusoutput(“ssh -p 22 %s ‘ps aux | grep datanode|grep -v grep’” %h)
if dn_status != 0:
commands.getstatusoutput(“ssh -p 22 %s ‘/etc/init.d/hadoop-hdfs-datanode restart’” %h)
status_dic['%s_dn_status' %h] = dn_status
else:
status_dic['%s_dn_status' %h] = dn_status
nm_status,nm_result = commands.getstatusoutput(“ssh -p 22 %s ‘ps aux | grep nodemanager|grep -v grep’” %h)
if nm_status != 0:
commands.getstatusoutput(“ssh -p 22 %s ‘/etc/init.d//etc/init.d/hadoop-yarn-nodemanager restart’” %h)
status_dic['%s_nm_status' %h] = nm_status
else:
status_dic['%s_nm_status' %h] = nm_status
print impala_result,”\n\n”, dn_result, “\n\n”, nm_result,”\n\n”
print “########################## %s end time: %s ###########################\n\n” %(h,time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()))
time.sleep(10)
def loopTask():
for i in range(1,3):
monitor_cmd()
print status_dic,
def reimpala():
commands.getstatusoutput(‘/root/cron_impala_control.sh’)
while True:
loopTask()
for k,v in status_dic.items():
if v == 0:
continue
else:
f = open(‘hadoop_status.txt’,’w’)
f.write(‘%s status faild!!’ %k)
f.close()
reimpala()