使用redis的分布式锁,可以使apscheduler分布式运行,避免job的重复运行
# coding: utf-8
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_MISSED, EVENT_JOB_EXECUTED
from datetime import datetime, timedelta
from apscheduler.util import timedelta_seconds,TIMEOUT_MAX
from apscheduler.executors.base import MaxInstancesReachedError
from apscheduler.events import JobSubmissionEvent,EVENT_JOB_MAX_INSTANCES,EVENT_JOB_SUBMITTED
import six
from util.db_pool import get_redis
import session
#: constant indicating a scheduler's paused state (started but not processing jobs)
STATE_PAUSED = 2
SCHEDU_FLAG = 'schedu_flag'
class Scheduler(TornadoScheduler):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(Scheduler,cls).__new__(cls, *args, **kwargs)
return cls._instance
def _process_jobs(self):
"""
Iterates through jobs in every jobstore, starts jobs that are due and figures out how long
to wait for the next round.
If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least
`