|
9 | 9 |
|
10 | 10 | from celery import current_app, schedules
|
11 | 11 | from celery.beat import ScheduleEntry, Scheduler
|
| 12 | +from celery.signals import beat_init |
12 | 13 | from celery.utils.log import get_logger
|
| 14 | +from redis.asyncio.lock import Lock |
13 | 15 | from sqlalchemy import select
|
14 | 16 | from sqlalchemy.exc import DatabaseError, InterfaceError
|
15 | 17 |
|
|
28 | 30 | # 此计划程序必须比常规的 5 分钟更频繁地唤醒,因为它需要考虑对计划的外部更改
|
29 | 31 | DEFAULT_MAX_INTERVAL = 5 # seconds
|
30 | 32 |
|
| 33 | +# 计划锁时长,避免重复创建 |
| 34 | +DEFAULT_MAX_LOCK_TIMEOUT = 300 # seconds |
| 35 | + |
| 36 | +# 锁检测周期,应小于计划锁时长 |
| 37 | +DEFAULT_LOCK_INTERVAL = 60 # seconds |
| 38 | + |
| 39 | +# Copied from: |
| 40 | +# https://github.com/andymccurdy/redis-py/blob/master/redis/lock.py#L33 |
| 41 | +# Changes: |
| 42 | +# The second line from the bottom: The original Lua script intends |
| 43 | +# to extend time to (lock remaining time + additional time); while |
| 44 | +# the script here extend time to an expected expiration time. |
| 45 | +# KEYS[1] - lock name |
| 46 | +# ARGS[1] - token |
| 47 | +# ARGS[2] - additional milliseconds |
| 48 | +# return 1 if the locks time was extended, otherwise 0 |
| 49 | +LUA_EXTEND_TO_SCRIPT = """ |
| 50 | + local token = redis.call('get', KEYS[1]) |
| 51 | + if not token or token ~= ARGV[1] then |
| 52 | + return 0 |
| 53 | + end |
| 54 | + local expiration = redis.call('pttl', KEYS[1]) |
| 55 | + if not expiration then |
| 56 | + expiration = 0 |
| 57 | + end |
| 58 | + if expiration < 0 then |
| 59 | + return 0 |
| 60 | + end |
| 61 | + redis.call('pexpire', KEYS[1], ARGV[2]) |
| 62 | + return 1 |
| 63 | +""" |
| 64 | + |
31 | 65 | logger = get_logger('fba.schedulers')
|
32 | 66 |
|
33 | 67 |
|
@@ -260,13 +294,18 @@ def _unpack_options(
|
260 | 294 |
|
261 | 295 |
|
262 | 296 | class DatabaseScheduler(Scheduler):
|
| 297 | + """数据库调度程序""" |
| 298 | + |
263 | 299 | Entry = ModelEntry
|
264 | 300 |
|
265 | 301 | _schedule = None
|
266 | 302 | _last_update = None
|
267 | 303 | _initial_read = True
|
268 | 304 | _heap_invalidated = False
|
269 | 305 |
|
| 306 | + lock: Lock | None = None |
| 307 | + lock_key = f'{settings.CELERY_REDIS_PREFIX}:beat_lock' |
| 308 | + |
270 | 309 | def __init__(self, *args, **kwargs):
|
271 | 310 | self.app = kwargs['app']
|
272 | 311 | self._dirty = set()
|
@@ -315,6 +354,16 @@ def reserve(self, entry):
|
315 | 354 | self._dirty.add(new_entry.name)
|
316 | 355 | return new_entry
|
317 | 356 |
|
| 357 | + def close(self): |
| 358 | + """重写父函数""" |
| 359 | + if self.lock: |
| 360 | + logger.info('beat: Releasing lock') |
| 361 | + if run_await(self.lock.owned)(): |
| 362 | + run_await(self.lock.release)() |
| 363 | + self.lock = None |
| 364 | + |
| 365 | + super().close() |
| 366 | + |
318 | 367 | def sync(self):
|
319 | 368 | """重写父函数"""
|
320 | 369 | _tried = set()
|
@@ -401,3 +450,48 @@ def schedule(self) -> dict[str, ModelEntry]:
|
401 | 450 |
|
402 | 451 | # logger.debug(self._schedule)
|
403 | 452 | return self._schedule
|
| 453 | + |
| 454 | + |
| 455 | +async def extend_scheduler_lock(lock): |
| 456 | + """ |
| 457 | + 延长调度程序锁 |
| 458 | +
|
| 459 | + :param lock: 计划程序锁 |
| 460 | + :return: |
| 461 | + """ |
| 462 | + while True: |
| 463 | + await asyncio.sleep(DEFAULT_LOCK_INTERVAL) |
| 464 | + if lock: |
| 465 | + try: |
| 466 | + await lock.extend(DEFAULT_MAX_LOCK_TIMEOUT) |
| 467 | + except Exception as e: |
| 468 | + logger.error(f'Failed to extend lock: {e}') |
| 469 | + |
| 470 | + |
| 471 | +@beat_init.connect |
| 472 | +def acquire_distributed_beat_lock(sender=None, *args, **kwargs): |
| 473 | + """ |
| 474 | + 尝试在启动时获取锁 |
| 475 | +
|
| 476 | + :param sender: 接收方应响应的发送方 |
| 477 | + :return: |
| 478 | + """ |
| 479 | + scheduler = sender.scheduler |
| 480 | + if not scheduler.lock_key: |
| 481 | + return |
| 482 | + |
| 483 | + logger.debug('beat: Acquiring lock...') |
| 484 | + lock = redis_client.lock( |
| 485 | + scheduler.lock_key, |
| 486 | + timeout=DEFAULT_MAX_LOCK_TIMEOUT, |
| 487 | + sleep=scheduler.max_interval, |
| 488 | + ) |
| 489 | + # overwrite redis-py's extend script |
| 490 | + # which will add additional timeout instead of extend to a new timeout |
| 491 | + lock.lua_extend = redis_client.register_script(LUA_EXTEND_TO_SCRIPT) |
| 492 | + run_await(lock.acquire)() |
| 493 | + logger.info('beat: Acquired lock') |
| 494 | + scheduler.lock = lock |
| 495 | + |
| 496 | + loop = asyncio.get_event_loop() |
| 497 | + loop.create_task(extend_scheduler_lock(scheduler.lock)) |
0 commit comments