Commit 398a74a4 authored by Vladislav Rykov's avatar Vladislav Rykov
Browse files

periodic server maintanance task added. confirmed config messages and data...

periodic server maintanance task added. confirmed config messages and data files are removed during maintanance task
parent aab39a2e
from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import datetime_to_utc_timestamp
class MemoryJobStore(BaseJobStore):
"""
Stores jobs in an array in RAM. Provides no persistence support.
Plugin alias: ``memory``
"""
def __init__(self):
super(MemoryJobStore, self).__init__()
# list of (job, timestamp), sorted by next_run_time and job id (ascending)
self._jobs = []
self._jobs_index = {} # id -> (job, timestamp) lookup table
def lookup_job(self, job_id):
return self._jobs_index.get(job_id, (None, None))[0]
def get_due_jobs(self, now):
now_timestamp = datetime_to_utc_timestamp(now)
pending = []
for job, timestamp in self._jobs:
if timestamp is None or timestamp > now_timestamp:
break
pending.append(job)
return pending
def get_next_run_time(self):
return self._jobs[0][0].next_run_time if self._jobs else None
def get_all_jobs(self):
return [j[0] for j in self._jobs]
def add_job(self, job):
if job.id in self._jobs_index:
raise ConflictingIdError(job.id)
timestamp = datetime_to_utc_timestamp(job.next_run_time)
index = self._get_job_index(timestamp, job.id)
self._jobs.insert(index, (job, timestamp))
self._jobs_index[job.id] = (job, timestamp)
def update_job(self, job):
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
if old_job is None:
raise JobLookupError(job.id)
# If the next run time has not changed, simply replace the job in its present index.
# Otherwise, reinsert the job to the list to preserve the ordering.
old_index = self._get_job_index(old_timestamp, old_job.id)
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
if old_timestamp == new_timestamp:
self._jobs[old_index] = (job, new_timestamp)
else:
del self._jobs[old_index]
new_index = self._get_job_index(new_timestamp, job.id)
self._jobs.insert(new_index, (job, new_timestamp))
self._jobs_index[old_job.id] = (job, new_timestamp)
def remove_job(self, job_id):
job, timestamp = self._jobs_index.get(job_id, (None, None))
if job is None:
raise JobLookupError(job_id)
index = self._get_job_index(timestamp, job_id)
del self._jobs[index]
del self._jobs_index[job.id]
def remove_all_jobs(self):
self._jobs = []
self._jobs_index = {}
def shutdown(self):
self.remove_all_jobs()
def _get_job_index(self, timestamp, job_id):
"""
Returns the index of the given job, or if it's not found, the index where the job should be
inserted based on the given timestamp.
:type timestamp: int
:type job_id: str
"""
lo, hi = 0, len(self._jobs)
timestamp = float('inf') if timestamp is None else timestamp
while lo < hi:
mid = (lo + hi) // 2
mid_job, mid_timestamp = self._jobs[mid]
mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
if mid_timestamp > timestamp:
hi = mid
elif mid_timestamp < timestamp:
lo = mid + 1
elif mid_job.id > job_id:
hi = mid
elif mid_job.id < job_id:
lo = mid + 1
else:
return mid
return lo
from __future__ import absolute_import
import warnings
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from bson.binary import Binary
from pymongo.errors import DuplicateKeyError
from pymongo import MongoClient, ASCENDING
except ImportError: # pragma: nocover
raise ImportError('MongoDBJobStore requires PyMongo installed')
class MongoDBJobStore(BaseJobStore):
"""
Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
pymongo's `MongoClient
<http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
Plugin alias: ``mongodb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(self, database='apscheduler', collection='jobs', client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(MongoDBJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if client:
self.client = maybe_ref(client)
else:
connect_args.setdefault('w', 1)
self.client = MongoClient(**connect_args)
self.collection = self.client[database][collection]
def start(self, scheduler, alias):
super(MongoDBJobStore, self).start(scheduler, alias)
self.collection.ensure_index('next_run_time', sparse=True)
@property
def connection(self):
warnings.warn('The "connection" member is deprecated -- use "client" instead',
DeprecationWarning)
return self.client
def lookup_job(self, job_id):
document = self.collection.find_one(job_id, ['job_state'])
return self._reconstitute_job(document['job_state']) if document else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs({'next_run_time': {'$lte': timestamp}})
def get_next_run_time(self):
document = self.collection.find_one({'next_run_time': {'$ne': None}},
projection=['next_run_time'],
sort=[('next_run_time', ASCENDING)])
return utc_timestamp_to_datetime(document['next_run_time']) if document else None
def get_all_jobs(self):
jobs = self._get_jobs({})
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
try:
self.collection.insert({
'_id': job.id,
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
})
except DuplicateKeyError:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
}
result = self.collection.update({'_id': job.id}, {'$set': changes})
if result and result['n'] == 0:
raise JobLookupError(job.id)
def remove_job(self, job_id):
result = self.collection.remove(job_id)
if result and result['n'] == 0:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.collection.remove()
def shutdown(self):
self.client.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, conditions):
jobs = []
failed_job_ids = []
for document in self.collection.find(conditions, ['_id', 'job_state'],
sort=[('next_run_time', ASCENDING)]):
try:
jobs.append(self._reconstitute_job(document['job_state']))
except BaseException:
self._logger.exception('Unable to restore job "%s" -- removing it',
document['_id'])
failed_job_ids.append(document['_id'])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.collection.remove({'_id': {'$in': failed_job_ids}})
return jobs
def __repr__(self):
return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
from __future__ import absolute_import
from datetime import datetime
from pytz import utc
import six
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from redis import Redis
except ImportError: # pragma: nocover
raise ImportError('RedisJobStore requires redis installed')
class RedisJobStore(BaseJobStore):
"""
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
:class:`~redis.StrictRedis`.
Plugin alias: ``redis``
:param int db: the database number to store jobs in
:param str jobs_key: key to store jobs in
:param str run_times_key: key to store the jobs' run times in
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(RedisJobStore, self).__init__()
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not jobs_key:
raise ValueError('The "jobs_key" parameter must not be empty')
if not run_times_key:
raise ValueError('The "run_times_key" parameter must not be empty')
self.pickle_protocol = pickle_protocol
self.jobs_key = jobs_key
self.run_times_key = run_times_key
self.redis = Redis(db=int(db), **connect_args)
def lookup_job(self, job_id):
job_state = self.redis.hget(self.jobs_key, job_id)
return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
if job_ids:
job_states = self.redis.hmget(self.jobs_key, *job_ids)
return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
return []
def get_next_run_time(self):
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
if next_run_time:
return utc_timestamp_to_datetime(next_run_time[0][1])
def get_all_jobs(self):
job_states = self.redis.hgetall(self.jobs_key)
jobs = self._reconstitute_jobs(six.iteritems(job_states))
paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
def add_job(self, job):
if self.redis.hexists(self.jobs_key, job.id):
raise ConflictingIdError(job.id)
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
self.pickle_protocol))
if job.next_run_time:
pipe.zadd(self.run_times_key,
{job.id: datetime_to_utc_timestamp(job.next_run_time)})
pipe.execute()
def update_job(self, job):
if not self.redis.hexists(self.jobs_key, job.id):
raise JobLookupError(job.id)
with self.redis.pipeline() as pipe:
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
self.pickle_protocol))
if job.next_run_time:
pipe.zadd(self.run_times_key,
{job.id: datetime_to_utc_timestamp(job.next_run_time)})
else:
pipe.zrem(self.run_times_key, job.id)
pipe.execute()
def remove_job(self, job_id):
if not self.redis.hexists(self.jobs_key, job_id):
raise JobLookupError(job_id)
with self.redis.pipeline() as pipe:
pipe.hdel(self.jobs_key, job_id)
pipe.zrem(self.run_times_key, job_id)
pipe.execute()
def remove_all_jobs(self):
with self.redis.pipeline() as pipe:
pipe.delete(self.jobs_key)
pipe.delete(self.run_times_key)
pipe.execute()
def shutdown(self):
self.redis.connection_pool.disconnect()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _reconstitute_jobs(self, job_states):
jobs = []
failed_job_ids = []
for job_id, job_state in job_states:
try:
jobs.append(self._reconstitute_job(job_state))
except BaseException:
self._logger.exception('Unable to restore job "%s" -- removing it', job_id)
failed_job_ids.append(job_id)
# Remove all the jobs we failed to restore
if failed_job_ids:
with self.redis.pipeline() as pipe:
pipe.hdel(self.jobs_key, *failed_job_ids)
pipe.zrem(self.run_times_key, *failed_job_ids)
pipe.execute()
return jobs
def __repr__(self):
return '<%s>' % self.__class__.__name__
from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from rethinkdb import RethinkDB
except ImportError: # pragma: nocover
raise ImportError('RethinkDBJobStore requires rethinkdb installed')
class RethinkDBJobStore(BaseJobStore):
"""
Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_.
Plugin alias: ``rethinkdb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(self, database='apscheduler', table='jobs', client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(RethinkDBJobStore, self).__init__()
if not database:
raise ValueError('The "database" parameter must not be empty')
if not table:
raise ValueError('The "table" parameter must not be empty')
self.database = database
self.table_name = table
self.table = None
self.client = client
self.pickle_protocol = pickle_protocol
self.connect_args = connect_args
self.r = RethinkDB()
self.conn = None
def start(self, scheduler, alias):
super(RethinkDBJobStore, self).start(scheduler, alias)
if self.client:
self.conn = maybe_ref(self.client)
else:
self.conn = self.r.connect(db=self.database, **self.connect_args)
if self.database not in self.r.db_list().run(self.conn):
self.r.db_create(self.database).run(self.conn)
if self.table_name not in self.r.table_list().run(self.conn):
self.r.table_create(self.table_name).run(self.conn)
if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn):
self.r.table(self.table_name).index_create('next_run_time').run(self.conn)
self.table = self.r.db(self.database).table(self.table_name)
def lookup_job(self, job_id):
results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn))
return self._reconstitute_job(results[0]['job_state']) if results else None
def get_due_jobs(self, now):
return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now))
def get_next_run_time(self):
results = list(
self.table
.filter(self.r.row['next_run_time'] != None) # noqa
.order_by(self.r.asc('next_run_time'))
.map(lambda x: x['next_run_time'])
.limit(1)
.run(self.conn)
)
return utc_timestamp_to_datetime(results[0]) if results else None
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
job_dict = {
'id': job.id,
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
}
results = self.table.insert(job_dict).run(self.conn)
if results['errors'] > 0:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
}
results = self.table.get_all(job.id).update(changes).run(self.conn)
skipped = False in map(lambda x: results[x] == 0, results.keys())
if results['skipped'] > 0 or results['errors'] > 0 or not skipped:
raise JobLookupError(job.id)
def remove_job(self, job_id):
results = self.table.get_all(job_id).delete().run(self.conn)
if results['deleted'] + results['skipped'] != 1:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.table.delete().run(self.conn)
def shutdown(self):
self.conn.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, predicate=None):
jobs = []
failed_job_ids = []
query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa
if predicate else self.table)
query = query.order_by('next_run_time', 'id').pluck('id', 'job_state')
for document in query.run(self.conn):
try:
jobs.append(self._reconstitute_job(document['job_state']))
except Exception:
self._logger.exception('Unable to restore job "%s" -- removing it', document['id'])
failed_job_ids.append(document['id'])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.r.expr(failed_job_ids).for_each(
lambda job_id: self.table.get_all(job_id).delete()).run(self.conn)
return jobs
def __repr__(self):
connection = self.conn
return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from sqlalchemy import (
create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import null
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
class SQLAlchemyJobStore(BaseJobStore):
"""
Stores jobs in a database table using SQLAlchemy.
The table will be created if it doesn't exist in the database.
Plugin alias: ``sqlalchemy``
:param str url: connection string (see
:ref:`SQLAlchemy documentation <sqlalchemy:database_urls>` on this)
:param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a
new one based on ``url``
:param str tablename: name of the table to store jobs in
:param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a
new one
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
:param str tableschema: name of the (existing) schema in the target database where the table
should be
:param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine`
(ignored if ``engine`` is given)
"""
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL, tableschema=None, engine_options=None):
super(SQLAlchemyJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
metadata = maybe_ref(metadata) or MetaData()
if engine:
self.engine = maybe_ref(engine)
elif url:
self.engine = create_engine(url, **(engine_options or {}))
else:
raise ValueError('Need either "engine" or "url" defined')
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
# 25 = precision that translates to an 8-byte float
self.jobs_t = Table(
tablename, metadata,
Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
Column('next_run_time', Float(25), index=True),
Column('job_state', LargeBinary, nullable=False),
schema=tableschema
)
def start(self, scheduler, alias):
super(SQLAlchemyJobStore, self).start(scheduler, alias)
self.jobs_t.create(self.engine, True)
def lookup_job(self, job_id):
selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id)
job_state = self.engine.execute(selectable).scalar()
return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
def get_next_run_time(self):
selectable = select([self.jobs_t.c.next_run_time]).\
where(self.jobs_t.c.next_run_time != null()).\
order_by(self.jobs_t.c.next_run_time).limit(1)
next_run_time = self.engine.execute(selectable).scalar()
return utc_timestamp_to_datetime(next_run_time)
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
insert = self.jobs_t.insert().values(**{
'id': job.id,
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
})
try:
self.engine.execute(insert)
except IntegrityError:
raise ConflictingIdError(job.id)
def update_job(self, job):
update = self.jobs_t.update().values(**{
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
}).where(self.jobs_t.c.id == job.id)
result = self.engine.execute(update)
if result.rowcount == 0:
raise JobLookupError(job.id)
def remove_job(self, job_id):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
result = self.engine.execute(delete)
if result.rowcount == 0:
raise JobLookupError(job_id)
def remove_all_jobs(self):
delete = self.jobs_t.delete()
self.engine.execute(delete)
def shutdown(self):
self.engine.dispose()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job_state['jobstore'] = self
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, *conditions):
jobs = []
selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\
order_by(self.jobs_t.c.next_run_time)
selectable = selectable.where(*conditions) if conditions else selectable
failed_job_ids = set()
for row in self.engine.execute(selectable):
try:
jobs.append(self._reconstitute_job(row.job_state))
except BaseException:
self._logger.exception('Unable to restore job "%s" -- removing it', row.id)
failed_job_ids.add(row.id)
# Remove all the jobs we failed to restore
if failed_job_ids:
delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
self.engine.execute(delete)
return jobs
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
from __future__ import absolute_import
import os
from datetime import datetime
from pytz import utc
from kazoo.exceptions import NoNodeError, NodeExistsError
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from kazoo.client import KazooClient
except ImportError: # pragma: nocover
raise ImportError('ZooKeeperJobStore requires Kazoo installed')
class ZooKeeperJobStore(BaseJobStore):
"""
Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
kazoo's `KazooClient
<http://kazoo.readthedocs.io/en/latest/api/client.html>`_.
Plugin alias: ``zookeeper``
:param str path: path to store jobs in
:param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False,
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(ZooKeeperJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
self.close_connection_on_exit = close_connection_on_exit
if not path:
raise ValueError('The "path" parameter must not be empty')
self.path = path
if client:
self.client = maybe_ref(client)
else:
self.client = KazooClient(**connect_args)
self._ensured_path = False
def _ensure_paths(self):
if not self._ensured_path:
self.client.ensure_path(self.path)
self._ensured_path = True
def start(self, scheduler, alias):
super(ZooKeeperJobStore, self).start(scheduler, alias)
if not self.client.connected:
self.client.start()
def lookup_job(self, job_id):
self._ensure_paths()
node_path = os.path.join(self.path, job_id)
try:
content, _ = self.client.get(node_path)
doc = pickle.loads(content)
job = self._reconstitute_job(doc['job_state'])
return job
except BaseException:
return None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
jobs = [job_def['job'] for job_def in self._get_jobs()
if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp]
return jobs
def get_next_run_time(self):
next_runs = [job_def['next_run_time'] for job_def in self._get_jobs()
if job_def['next_run_time'] is not None]
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
def get_all_jobs(self):
jobs = [job_def['job'] for job_def in self._get_jobs()]
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
self._ensure_paths()
node_path = os.path.join(self.path, str(job.id))
value = {
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': job.__getstate__()
}
data = pickle.dumps(value, self.pickle_protocol)
try:
self.client.create(node_path, value=data)
except NodeExistsError:
raise ConflictingIdError(job.id)
def update_job(self, job):
self._ensure_paths()
node_path = os.path.join(self.path, str(job.id))
changes = {
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': job.__getstate__()
}
data = pickle.dumps(changes, self.pickle_protocol)
try:
self.client.set(node_path, value=data)
except NoNodeError:
raise JobLookupError(job.id)
def remove_job(self, job_id):
self._ensure_paths()
node_path = os.path.join(self.path, str(job_id))
try:
self.client.delete(node_path)
except NoNodeError:
raise JobLookupError(job_id)
def remove_all_jobs(self):
try:
self.client.delete(self.path, recursive=True)
except NoNodeError:
pass
self._ensured_path = False
def shutdown(self):
if self.close_connection_on_exit:
self.client.stop()
self.client.close()
def _reconstitute_job(self, job_state):
job_state = job_state
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self):
self._ensure_paths()
jobs = []
failed_job_ids = []
all_ids = self.client.get_children(self.path)
for node_name in all_ids:
try:
node_path = os.path.join(self.path, node_name)
content, _ = self.client.get(node_path)
doc = pickle.loads(content)
job_def = {
'job_id': node_name,
'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None,
'job_state': doc['job_state'],
'job': self._reconstitute_job(doc['job_state']),
'creation_time': _.ctime
}
jobs.append(job_def)
except BaseException:
self._logger.exception('Unable to restore job "%s" -- removing it' % node_name)
failed_job_ids.append(node_name)
# Remove all the jobs we failed to restore
if failed_job_ids:
for failed_id in failed_job_ids:
self.remove_job(failed_id)
paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key,
job_def['creation_time']))
def __repr__(self):
self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client))
return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
class SchedulerAlreadyRunningError(Exception):
"""Raised when attempting to start or configure the scheduler when it's already running."""
def __str__(self):
return 'Scheduler is already running'
class SchedulerNotRunningError(Exception):
"""Raised when attempting to shutdown the scheduler when it's not running."""
def __str__(self):
return 'Scheduler is not running'
from __future__ import absolute_import
from functools import wraps, partial
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
import asyncio
except ImportError: # pragma: nocover
try:
import trollius as asyncio
except ImportError:
raise ImportError(
'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
def run_in_event_loop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
wrapped = partial(func, self, *args, **kwargs)
self._eventloop.call_soon_threadsafe(wrapped)
return wrapper
class AsyncIOScheduler(BaseScheduler):
"""
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
The default executor can run jobs based on native coroutines (``async def``).
Extra options:
============== =============================================================
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
============== =============================================================
"""
_eventloop = None
_timeout = None
@run_in_event_loop
def shutdown(self, wait=True):
super(AsyncIOScheduler, self).shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
super(AsyncIOScheduler, self)._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
def _stop_timer(self):
if self._timeout:
self._timeout.cancel()
del self._timeout
@run_in_event_loop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
def _create_default_executor(self):
from apscheduler.executors.asyncio import AsyncIOExecutor
return AsyncIOExecutor()
from __future__ import absolute_import
from threading import Thread, Event
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.util import asbool
class BackgroundScheduler(BlockingScheduler):
"""
A scheduler that runs in the background using a separate thread
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
Extra options:
========== =============================================================================
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
`the documentation
<https://docs.python.org/3.4/library/threading.html#thread-objects>`_
for further details)
========== =============================================================================
"""
_thread = None
def _configure(self, config):
self._daemon = asbool(config.pop('daemon', True))
super(BackgroundScheduler, self)._configure(config)
def start(self, *args, **kwargs):
self._event = Event()
BaseScheduler.start(self, *args, **kwargs)
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.daemon = self._daemon
self._thread.start()
def shutdown(self, *args, **kwargs):
super(BackgroundScheduler, self).shutdown(*args, **kwargs)
self._thread.join()
del self._thread
This diff is collapsed.
from __future__ import absolute_import
from threading import Event
from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED
from apscheduler.util import TIMEOUT_MAX
class BlockingScheduler(BaseScheduler):
"""
A scheduler that runs in the foreground
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
"""
_event = None
def start(self, *args, **kwargs):
self._event = Event()
super(BlockingScheduler, self).start(*args, **kwargs)
self._main_loop()
def shutdown(self, wait=True):
super(BlockingScheduler, self).shutdown(wait)
self._event.set()
def _main_loop(self):
wait_seconds = TIMEOUT_MAX
while self.state != STATE_STOPPED:
self._event.wait(wait_seconds)
self._event.clear()
wait_seconds = self._process_jobs()
def wakeup(self):
self._event.set()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment