前言
使用场景:ceph某模块高可用实现方案
vim /etc/keepalived/keepalived.conf配置说明
! Configuration File for keepalived
global_defs {
notification_email {
admin@example.com
}
notification_email_from noreply_admin@example.com
smtp_server 127.0.0.1
smtp_connect_timeout 60
router_id BALANCE_ID # 标识名
enable_script_security # 设置脚本的可运行性
script_user root # 脚本执行用户
}
vrrp_instance VI_1 { # 实例1
state BACKUP #可以是MASTER或BACKUP,不过当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
nopreempt # 非抢占模式
preempt_delay 1 # 抢占延时,默认零秒,这里设置1秒
interface ens33 # 网卡
virtual_router_id 1 # 虚拟路由ID,相同id的vrrp实例为一组,取值在0-255之间,用来区分多个instance的VRRP组播,同一网段内ID不能重复;主备必须为一样;
priority 150 # 用来选举master的,要成为master那么这个选项的值最好高于其他机器50个点,该项取值范围是1-255(在此范围之外会被识别成默认值100)
advert_int 1 # 检查间隔默认为1秒,即1秒进行一次master选举(可以认为是健康查检时间间隔)
authentication { # 认证区域,认证类型有PASS和HA(IPSEC),推荐使用PASS(密码只识别前8位)
auth_type PASS # 类型 推荐PASS
auth_pass 123456 # 密码
}
virtual_ipaddress { # 虚拟出的VIP
192.168.93.111
}
# 组播模式下所有的信息都会向224.0.0.18的组播地址发送,产生众多的无用信息,并且会产生干扰和冲突,建议使用如下单播模式
unicast_src_ip 192.168.93.101 # 单播模式本机地址
unicast_peer { # 单播模式目标地址
192.168.93.102
192.168.93.103
192.168.93.104
192.168.93.105
}
}
vrrp_instance VI_2 {
state BACKUP
inopreempt
preempt_delay 1
interface ens33
virtual_router_id 2
priority 110
advert_int 1
。。。
主节点执行代码
main_server.py
# -*- coding: utf-8 -*-
from collections import namedtuple
import socket
import os,sys
import traceback
import json
import remoto
import time,datetime
from concurrent.futures import ThreadPoolExecutor
keepalivedData = namedtuple('keepalivedData', ['fun_name', 'priority', 'virtual_ipaddress','interface'])
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logging.root.handlers[0].setLevel(logging.DEBUG)
#from . import write_keep
import write_keep
class RemoteParam(object):
def __init__(self, remote_host, remote_client, remote_fun_name, logger,param=None):
self.remote_host = remote_host
self.remote_client = remote_client
self.remote_fun_name = remote_fun_name
self.param = param
self.logger=logger
class NewThreadPoolExecutor(ThreadPoolExecutor):
def new_map(self, fn, timeout, *iterables):
end_time= time.time()
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
# reverse to keep finishing order
fs.reverse()
while fs:
# Careful not to keep a reference to the popped future
if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
class JSONDataEncoder(json.JSONEncoder):
def default(self, obj):
d = {'__class__': obj.__class__.__name__, '__module__': obj.__module__}
d.update(obj.__dict__)
return d
class JSONDataDecoder(json.JSONDecoder):
def __init__(self):
json.JSONDecoder.__init__(self, object_hook=self.dict2obj)
def dict2obj(self, d):
if '__class__' in d:
class_name = d.pop('__class__')
module_name = d.pop('__module__')
module = __import__(module_name)
class_ = getattr(module, class_name)
args = dict((key.encode('ascii'), value) for key, value in d.items())
instance = class_(**args)
else:
instance = d
return instance
def decode2bj(self, class_name, json_data):
d = self.decode(json_data)
return globals()[class_name]._make(d)
def remote_call(hostname, remote_client, fun_name,logger, param=None):
conn = remoto.Connection(hostname)
remote_mgr = conn.import_module(remote_client)
data = None if param is None else JSONDataEncoder().encode(param)
return_data = getattr(remote_mgr, fun_name)() if data is None else getattr(remote_mgr, fun_name)(data)
conn.exit()
return return_data
def execute_thread(param):
return remote_call(param.remote_host, param.remote_client, param.remote_fun_name, param.logger ,param.param)
def concurrent_execution_remote_call(logger,param_list, max_retry_time=1, default_works_num=10):
try:
executor = NewThreadPoolExecutor(max_workers=default_works_num)
remote_result={'status': 200,'desc': ''}
debug=''
for result in executor.new_map(execute_thread, 120, param_list):
now_time = datetime.datetime.now().strftime('%F %T')
logger.warning("[samba] %s remote debug : %s ",now_time, result.encode("utf-8"))
result1=eval(result.encode("utf-8"))
if result1['status']==500:
remote_result['status']=result1['status']
remote_result['desc']=result1['desc'] if remote_result['desc'] == '' else remote_result['desc']+' \n\t'+result1['desc']
debug+=result1['debug']
elif result1['status']!=200:
debug+=result1['debug']
return remote_result
except Exception as err:
ret_code=get_result(500,' {} ; {}'.format( err , traceback.format_exc()))
return ret_code
def get_host_name(host):
hostname = socket.gethostbyaddr(host)[0]
return hostname
def remote_execute(logger,ip_list, param, max_retry_time=3):
param_list = []
for ip in ip_list:
hostname = get_host_name(ip)
if hostname is None:
return get_result(500,u'The host name with IP {} was not obtained'.format(ip))
remote_param = RemoteParam(hostname, write_keep, 'dispatcher', logger,param)
param_list.append(remote_param)
return concurrent_execution_remote_call(logger,param_list, 1)
def get_result(status=200,desc=''):
return {
'status': status,
'desc': desc,
'debug':''
}
keepaliveddata=keepalivedData('writeKeepalived',100,'192.168.110.200','ens33')
status=remote_execute(logger,['192.168.110.193','192.168.110.191'], keepaliveddata)
print status
keepaliveddata=keepalivedData('writeKeepalived',101,'192.168.110.200','ens33')
status=remote_execute(logger,['192.168.110.193'], keepaliveddata)
print status
其他节点(客户端)执行代码
files_share_client.py
# -*- coding: utf-8 -*-
import os,re
import traceback
import sys
import json
reload(sys)
sys.setdefaultencoding('utf-8')
os.chdir('/usr/share/ceph/mgr/dashboard')
sys.path.append('/usr/share/ceph/mgr/dashboard')
from services.manage.service.cephfs.files_share_samba.create_samba_fs_service import JSONDataDecoder
def get_result(status=200,desc='',debug=''):
return {
'status': status,
'desc': desc,
'debug':debug
}
def init_config_file():
kl_conf_path='/etc/keepalived/keepalived.conf'
is_ok=False
interface=None
if os.path.isfile(kl_conf_path):
f=open(kl_conf_path, 'r')
lines=f.readlines()
f.close()
for line in lines:
line=line.strip()
smb = re.search( r'include\s+/etc/keepalived/keepalived_mgr_servers.conf', line, re.L)
if smb:
is_ok=True
res = re.search( r'(?<=interface).+', line, re.L)
if res:
interface=res.group(0)
if not is_ok:
f=open(kl_conf_path, 'a')
f.write('include /etc/keepalived/keepalived_mgr_servers.conf\n')
f.close()
return interface
def writeKeepalived_config_on_node(param):
interface=init_config_file()
kl_conf_path="/etc/keepalived/keepalived_mgr_servers.conf"
Keepalived_config = """
vrrp_instance VI_MGR_SERVISES {
state BACKUP
interface """+(interface.strip() if interface else str(param.interface))+"""
virtual_router_id 125
priority """+str(param.priority)+"""
advert_int 1
authentication {
auth_type PASS
auth_pass 100
}
virtual_ipaddress {
"""+str(param.virtual_ipaddress)+"""
}
}
"""
f=open(kl_conf_path, 'w')
f.write(Keepalived_config)
f.close()
if os.path.isfile("/var/run/keepalived.pid"):
cmd = "kill -HUP $(cat /var/run/keepalived.pid)"
ret_code = os.system(cmd)
if ret_code != 0:
return get_result(500,'failed to reload Keepalived')
else:
cmd = "systemctl restart keepalived"
ret_code = os.system(cmd)
if ret_code != 0:
return get_result(500,'failed to restart Keepalived')
return get_result(200,'succeed!')
def writeKeepalived(param):
try:
status=writeKeepalived_config_on_node(param)
return status
except Exception as err:
return get_result(500,'', ' {} ; {}'.format( err , traceback.format_exc()))
def dispatcher(data):
try:
param = JSONDataDecoder().decode2bj('keepalivedData', data)
p= "dispatcher :{}, param(priority: {}, virtual_ipaddress:{}, interface:{}".format(
param.fun_name, param.priority,param.virtual_ipaddress, param.interface)
ret_code = eval(param.fun_name)(param)
return json.dumps(ret_code).decode("unicode-escape")
except Exception as err:
ret_code=get_result(500,'',' {} ; {}'.format( err , traceback.format_exc()))
return json.dumps(ret_code).decode("unicode-escape")
if __name__ == '__channelexec__':
for item in channel:
channel.send(eval(item))