...
# 路劲加载
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "../")))
class Server(object):
"""
Server class loads and starts Handlers and Collectors
"""
def __init__(self, configfile):
# Initialize Logging
self.log = logging.getLogger('diamond')
# Initialize Members
self.configfile = configfile
self.config = None
self.handlers = []
self.handler_queue = []
self.modules = {}
self.metric_queue = None #对应的队列
def run(self):
"""
Load handler and collector classes and then start collectors
"""
#######################################################################
# Config
#######################################################################
self.config = load_config(self.configfile) #加载对应的配置文件
collectors = load_collectors(self.config['server']['collectors_path']) #获取对应的收集对象
metric_queue_size = int(self.config['server'].get('metric_queue_size',
16384))
self.metric_queue = self.manager.Queue(maxsize=metric_queue_size) #创建进程可用的队列
self.log.debug('metric_queue_size: %d', metric_queue_size)
if 'handlers_path' in self.config['server']:
handlers_path = self.config['server']['handlers_path']
# Make an list if not one
if isinstance(handlers_path, basestring):
handlers_path = handlers_path.split(',')
handlers_path = map(str.strip, handlers_path)
self.config['server']['handlers_path'] = handlers_path
load_include_path(handlers_path)
if 'handlers' not in self.config['server']:
self.log.critical('handlers missing from server section in config')
sys.exit(1)
handlers = self.config['server'].get('handlers')
if isinstance(handlers, basestring):
handlers = [handlers]
# Prevent the Queue Handler from being a normal handler
if 'diamond.handler.queue.QueueHandler' in handlers:
handlers.remove('diamond.handler.queue.QueueHandler')
self.handlers = load_handlers(self.config, handlers) #加载对应的处理器
QueueHandler = load_dynamic_class(
'diamond.handler.queue.QueueHandler',
Handler
) #获取一个处理器队列
self.handler_queue = QueueHandler(
config=self.config, queue=self.metric_queue, log=self.log)
process = multiprocessing.Process( #只产生一个进程
name="Handlers",
target=handler_process, # 参数为handler_process,调用handler处理对应的发送数据
args=(self.handlers, self.metric_queue, self.log), #对应的参数如下
)
process.daemon = True #设置为daemon函数
process.start() #启动对应的进程
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, signal_to_exception) #安装对应的异常处理函数
#######################################################################
while True:
try:
active_children = multiprocessing.active_children() #获取所有正常运行的子进程
running_processes = []
for process in active_children:
running_processes.append(process.name) #保存子进程的名字
running_processes = set(running_processes)
##############################################################
# Collectors
##############################################################
running_collectors = [] #获取默认启动的Collecter
for collector, config in self.config['collectors'].iteritems():
if config.get('enabled', False) is not True:
continue
running_collectors.append(collector) #添加启动的Collecter
running_collectors = set(running_collectors)
# Collectors that are running but shouldn't be
for process_name in running_processes - running_collectors: #关闭不再处理的Collecter进程
if 'Collector' not in process_name:
continue
for process in active_children:
if process.name == process_name:
process.terminate() #终止对应的进程
collector_classes = dict(
(cls.__name__.split('.')[-1], cls) #键值和对应的cls
for cls in collectors.values()
)
load_delay = self.config['server'].get('collectors_load_delay',
1.0)
for process_name in running_collectors - running_processes: #当前还没有运行的子进程
# To handle running multiple collectors concurrently, we
# split on white space and use the first word as the
# collector name to spin
collector_name = process_name.split()[0]
if 'Collector' not in collector_name:
continue
if collector_name not in collector_classes:
self.log.error('Can not find collector %s',
collector_name)
continue
collector = initialize_collector(
collector_classes[collector_name],
name=process_name,
configfile=self.configfile,
handlers=[self.handler_queue]) #初始化对应的Collecter
if collector is None:
self.log.error('Failed to load collector %s',
process_name)
continue
# Splay the loads
time.sleep(float(load_delay))
process = multiprocessing.Process(
name=process_name,
target=collector_process, #对应的Collecter的处理函数
args=(collector, self.metric_queue, self.log)
) #创建对应的子进程,即对应到对应的Collecter-name
process.daemon = True #设置为deamon的形式
process.start() #加载对应的进程
##############################################################
time.sleep(1)
except SIGHUPException: #如果出现了SIGHUPException则进程重新加载配置的操作,然后重新定义信号
# ignore further SIGHUPs for now
original_sighup_handler = signal.getsignal(signal.SIGHUP)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.log.info('Reloading state due to HUP')
self.config = load_config(self.configfile)
collectors = load_collectors(
self.config['server']['collectors_path']