from datetime import datetime
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ProcessPoolExecutor
from apscheduler.executors.base import run_job as aps_run_job
scheduler = None
def init_scheduler():
global scheduler
scheduler = BackgroundScheduler(executors={'processpool': ProcessPoolExecutor(10)})
# scheduler.add_listener(record_error, (EVENT_JOB_ERROR))
# scheduler.add_listener(record_success, (EVENT_JOB_EXECUTED))
scheduler.start()
def get_job_by_id(job_id):
global scheduler
job = scheduler.get_job(job_id)
return job
def run_job(job_id):
job_exist = get_job_by_id(job_id)
if job_exist:
# 找到job就要运行
events = aps_run_job(job_exist, 'default', [datetime.now(utc)], __name__)
# 处理结果
global scheduler
for event in events:
scheduler._dispatch_event(event)
return ResultMaker(200, "success", [])
else:
return ResultMaker(404, "not found", [])
这东西可能没什么用,简单搜了下没找到,于是自己写一个。
apscheduler其实自带立即运行的函数,不过得先把job找出来。
上面这个同时间点可运行多个,要支持同时间点只运行一个得抄_process_jobs函数的内容
def run_job(job_id, jobstore_alias='default'):
global scheduler
job = get_job_by_id(job_id)
if job:
executor = scheduler._lookup_executor(job.executor)
run_times = [datetime.now(utc)]
try:
executor.submit_job(job, run_times)
except MaxInstancesReachedError:
scheduler._logger.warning(
'Execution of job "%s" skipped: maximum number of running '
'instances reached (%d)', job, job.max_instances)
event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id,
jobstore_alias, run_times)
scheduler._dispatch_event(event)
except BaseException:
scheduler._logger.exception('Error submitting job "%s" to executor "%s"',
job, job.executor)
else:
event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias,
run_times)
scheduler._dispatch_event(event)