是否有一种去除Celery任务的标准方法?
例如,这样任务就可以开始了#34;多次,但只会在延迟一段时间后运行一次:
def debounce_task(task):
if task_is_queued(task):
return
task.apply_async(countdown=30)
答案 0 :(得分:6)
以下是我们如何使用Redis计数器进行操作的方法。所有这些都可以在装饰器中推广,但我们只将其用于特定任务(webhooks)
您面向公众的任务就是您从其他功能调用的任务。它需要在Redis中增加一个键。密钥由您的函数的参数形成,无论它们是什么(这确保计数器在各个任务中是唯一的)
@task
def your_public_task(*args, **kwargs):
cache_key = make_public_task_cache_key(*args, **kwargs)
get_redis().incr(cache_key)
_your_task(*args, **kwargs, countdown=settings.QUEUE_DELAY)
请注意缓存密钥功能是共享的(您希望每个功能中都有相同的缓存密钥)和countdown
设置。
然后,执行代码的实际任务执行以下操作:
@task
def _your_task(*args, **kwargs):
cache_key = make_public_task_cache_key(*args, **kwargs)
counter = get_redis().getset(cache_key, 0)
# redis makes the zero a string.
if counter == '0':
return
... execute your actual task code.
这使您可以在your_public_task.delay(..)
内多次点击QUEUE_DELAY
,并且只会触发一次。
答案 1 :(得分:1)
以下是使用Mongo的方法。
注意:我必须让设计更加宽容,因为Celery任务无法保证执行eta
满足或countdown
用完的确切时刻。
此外,Mongo到期索引只会每分钟清理一次;因此,在eta
启动的那一刻,您无法将设计基于被删除的记录。
无论如何,流程是这样的:
my_task
。preflight
会递增一个来电计数器,并将其作为flight_id
_my_task
设置为在TTL
秒后执行。_my_task
运行时,会检查flight_id
是否仍然是最新的。如果不是,就会中止。@celery.task(track_started=False, ignore_result=True)
def my_task(my_arg):
flight_id = preflight(inflight_collection, 'my_task', HASH(my_arg), TTL)
_my_task.apply_async((my_arg,), {'flight_id':flight_id}, countdown=TTL)
@celery.task(track_started=False, ignore_result=True)
def _my_task(my_arg, flight_id=None):
if not check_for_takeoff(inflight_collection, 'my_task', HASH(my_arg), flight_id):
return
# ... actual work ... #
图书馆代码:
TTL = 5 * 60 # Run tasks after 5 minutes
EXPIRY = 6 * TTL # This needs to be much larger than TTL.
# We need to store a list of task-executions currently pending
inflight_collection = db['celery_In_Flight']
inflight_collection.create_index([('fn', pymongo.ASCENDING,),
('key', pymongo.ASCENDING,)])
inflight_collection.create_index('eta', expiresAfterSeconds=EXPIRY)
def preflight(collection, fn, key, ttl):
eta = datetime.datetime.now() + datetime.timedelta(seconds=ttl)
result = collection.find_one_and_update({
'fn': fn,
'key': key,
}, {
'$set': {
'eta': eta
},
'$inc': {
'flightId': 1
}
}, upsert=True, return_document=pymongo.ReturnDocument.AFTER)
print 'Preflight[{}][{}] = {}'.format(fn, key, result['flightId'])
return result['flightId']
def check_for_takeoff(collection, fn, key, flight_id):
result = collection.find_one({
'fn': fn,
'key': key
})
ready = result is None or result['flightId'] == flight_id
print 'Check[{}][{}] = {}, {}'.format(fn, key, result['flightId'], ready)
return ready
答案 2 :(得分:1)
bartek有这个想法,使用原子的redis计数器(如果你的经纪人是redis,应该很容易获得)。虽然他的解决方案是喋喋不休,而不是贬值。虽然差别很小(getset vs decr)。
排队任务:
conn = get_redis()
conn.incr(key)
task.apply_async(args=args, kwargs=kwargs, countdown=countdown)
然后在任务中:
conn = get_redis()
counter = conn.decr(key)
if counter > 0:
# task is still queued
return
# continue on to rest of task
很难让它成为装饰器,因为你需要装饰任务并调用任务本身。所以你需要在celery @task装饰器之前安装一个装饰器,然后在它之后需要一个装饰器。
现在我只是制作了一些帮助我调用任务的函数,以及一个检查任务开始的函数。
答案 3 :(得分:0)
以下是我提出的解决方案:https://gist.github.com/wolever/3cf2305613052f3810a271e09d42e35c
并在此处复制,供后人使用:
import time
import redis
def get_redis_connection():
return redis.connect()
class TaskDebouncer(object):
""" A simple Celery task debouncer.
Usage::
def debounce_process_corpus(corpus):
# Only one task with ``key`` will be allowed to execute at a
# time. For example, if the task was resizing an image, the key
# might be the image's URL.
key = "process_corpus:%s" %(corpus.id, )
TaskDebouncer.delay(
key, my_taks, args=[corpus.id], countdown=0,
)
@task(bind=True)
def process_corpus(self, corpus_id, debounce_key=None):
debounce = TaskDebouncer(debounce_key, keepalive=30)
corpus = Corpus.load(corpus_id)
try:
for item in corpus:
item.process()
# If ``debounce.keepalive()`` isn't called every
# ``keepalive`` interval (the ``keepalive=30`` in the
# call to ``TaskDebouncer(...)``) the task will be
# considered dead and another one will be allowed to
# start.
debounce.keepalive()
finally:
# ``finalize()`` will mark the task as complete and allow
# subsequent tasks to execute. If it returns true, there
# was another attempt to start a task with the same key
# while this task was running. Depending on your business
# logic, this might indicate that the task should be
# retried.
needs_retry = debounce.finalize()
if needs_retry:
raise self.retry(max_retries=None)
"""
def __init__(self, key, keepalive=60):
if key:
self.key = key.partition("!")[0]
self.run_key = key
else:
self.key = None
self.run_key = None
self._keepalive = keepalive
self.cxn = get_redis_connection()
self.init()
self.keepalive()
@classmethod
def delay(cls, key, task, args=None, kwargs=None, countdown=30):
cxn = get_redis_connection()
now = int(time.time())
first = cxn.set(key, now, nx=True, ex=countdown + 10)
if not first:
now = cxn.get(key)
run_key = "%s!%s" %(key, now)
if first:
kwargs = dict(kwargs or {})
kwargs["debounce_key"] = run_key
task.apply_async(args=args, kwargs=kwargs, countdown=countdown)
return (first, run_key)
def init(self):
self.initial = self.key and self.cxn.get(self.key)
def keepalive(self, expire=None):
if self.key is None:
return
expire = expire if expire is not None else self._keepalive
self.cxn.expire(self.key, expire)
def is_out_of_date(self):
if self.key is None:
return False
return self.cxn.get(self.key) != self.initial
def finalize(self):
if self.key is None:
return False
with self.cxn.pipeline() as pipe:
while True:
try:
pipe.watch(self.key)
if pipe.get(self.key) != self.initial:
return True
pipe.multi()
pipe.delete(self.key)
pipe.execute()
break
except redis.WatchError:
continue
return False
答案 4 :(得分:0)
这是一个基于https://stackoverflow.com/a/28157498/4391298的更加完整的解决方案,但转变为装饰者并进入Kombu连接池以重复使用Redis计数器。
import logging
from functools import wraps
# Not strictly required
from django.core.exceptions import ImproperlyConfigured
from django.core.cache.utils import make_template_fragment_key
from celery.utils import gen_task_name
LOGGER = logging.getLogger(__name__)
def debounced_task(**options):
"""Debounced task decorator."""
try:
countdown = options.pop('countdown')
except KeyError:
raise ImproperlyConfigured("Debounced tasks require a countdown")
def factory(func):
"""Decorator factory."""
try:
name = options.pop('name')
except KeyError:
name = gen_task_name(app, func.__name__, func.__module__)
@wraps(func)
def inner(*args, **kwargs):
"""Decorated function."""
key = make_template_fragment_key(name, [args, kwargs])
with app.pool.acquire_channel(block=True) as (_, channel):
depth = channel.client.decr(key)
if depth <= 0:
try:
func(*args, **kwargs)
except:
# The task failed (or is going to retry), set the
# count back to where it was
channel.client.set(key, depth)
raise
else:
LOGGER.debug("%s calls pending to %s",
depth, name)
task = app._task_from_fun(inner, **options, name=name + '__debounced')
@wraps(func)
def debouncer(*args, **kwargs):
"""
Debouncer that calls the real task.
This is the task we are scheduling."""
key = make_template_fragment_key(name, [args, kwargs])
with app.pool.acquire_channel(block=True) as (_, channel):
# Mark this key to expire after the countdown, in case our
# task never runs or runs too many times, we want to clean
# up our Redis to eventually resolve the issue.
channel.client.expire(key, countdown + 10)
depth = channel.client.incr(key)
LOGGER.debug("Requesting %s in %i seconds (depth=%s)",
name, countdown, depth)
task.si(*args, **kwargs).apply_async(countdown=countdown)
return app._task_from_fun(debouncer, **options, name=name)
return factory