repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
celery/celery | 6,147 | celery__celery-6147 | [
"6143"
] | 4e2a59afd8c8ef70bfe387e470531e8bf87c1587 | diff --git a/celery/backends/cassandra.py b/celery/backends/cassandra.py
--- a/celery/backends/cassandra.py
+++ b/celery/backends/cassandra.py
@@ -3,6 +3,7 @@
from __future__ import absolute_import, unicode_literals
import sys
+import threading
from celery import states
from celery.exceptions import ImproperlyConfigured
@@ -14,6 +15,7 @@
import cassandra
import cassandra.auth
import cassandra.cluster
+ import cassandra.query
except ImportError: # pragma: no cover
cassandra = None # noqa
@@ -123,17 +125,11 @@ def __init__(self, servers=None, keyspace=None, table=None, entry_ttl=None,
raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER)
self.auth_provider = auth_provider_class(**auth_kwargs)
- self._connection = None
+ self._cluster = None
self._session = None
self._write_stmt = None
self._read_stmt = None
- self._make_stmt = None
-
- def process_cleanup(self):
- if self._connection is not None:
- self._connection.shutdown() # also shuts down _session
- self._connection = None
- self._session = None
+ self._lock = threading.RLock()
def _get_connection(self, write=False):
"""Prepare the connection for action.
@@ -141,14 +137,17 @@ def _get_connection(self, write=False):
Arguments:
write (bool): are we a writer?
"""
- if self._connection is not None:
+ if self._session is not None:
return
+ self._lock.acquire()
try:
- self._connection = cassandra.cluster.Cluster(
+ if self._session is not None:
+ return
+ self._cluster = cassandra.cluster.Cluster(
self.servers, port=self.port,
auth_provider=self.auth_provider,
**self.cassandra_options)
- self._session = self._connection.connect(self.keyspace)
+ self._session = self._cluster.connect(self.keyspace)
# We're forced to do concatenation below, as formatting would
# blow up on superficial %s that'll be processed by Cassandra
@@ -172,25 +171,27 @@ def _get_connection(self, write=False):
# Anyway; if you're doing anything critical, you should
# have created this table in advance, in which case
# this query will be a no-op (AlreadyExists)
- self._make_stmt = cassandra.query.SimpleStatement(
+ make_stmt = cassandra.query.SimpleStatement(
Q_CREATE_RESULT_TABLE.format(table=self.table),
)
- self._make_stmt.consistency_level = self.write_consistency
+ make_stmt.consistency_level = self.write_consistency
try:
- self._session.execute(self._make_stmt)
+ self._session.execute(make_stmt)
except cassandra.AlreadyExists:
pass
except cassandra.OperationTimedOut:
# a heavily loaded or gone Cassandra cluster failed to respond.
# leave this class in a consistent state
- if self._connection is not None:
- self._connection.shutdown() # also shuts down _session
+ if self._cluster is not None:
+ self._cluster.shutdown() # also shuts down _session
- self._connection = None
+ self._cluster = None
self._session = None
raise # we did fail after all - reraise
+ finally:
+ self._lock.release()
def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
| diff --git a/t/unit/backends/test_cassandra.py b/t/unit/backends/test_cassandra.py
--- a/t/unit/backends/test_cassandra.py
+++ b/t/unit/backends/test_cassandra.py
@@ -10,7 +10,12 @@
from celery.exceptions import ImproperlyConfigured
from celery.utils.objects import Bunch
-CASSANDRA_MODULES = ['cassandra', 'cassandra.auth', 'cassandra.cluster']
+CASSANDRA_MODULES = [
+ 'cassandra',
+ 'cassandra.auth',
+ 'cassandra.cluster',
+ 'cassandra.query',
+]
@mock.module(*CASSANDRA_MODULES)
@@ -66,7 +71,6 @@ def test_get_task_meta_for(self, *modules):
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
- x._connection = True
session = x._session = Mock()
execute = session.execute = Mock()
result_set = Mock()
@@ -83,24 +87,24 @@ def test_get_task_meta_for(self, *modules):
meta = x._get_task_meta_for('task_id')
assert meta['status'] == states.PENDING
+ def test_as_uri(self):
+ # Just ensure as_uri works properly
+ from celery.backends import cassandra as mod
+ mod.cassandra = Mock()
+
+ x = mod.CassandraBackend(app=self.app)
+ x.as_uri()
+ x.as_uri(include_password=False)
+
def test_store_result(self, *modules):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
- x._connection = True
session = x._session = Mock()
session.execute = Mock()
x._store_result('task_id', 'result', states.SUCCESS)
- def test_process_cleanup(self, *modules):
- from celery.backends import cassandra as mod
- x = mod.CassandraBackend(app=self.app)
- x.process_cleanup()
-
- assert x._connection is None
- assert x._session is None
-
def test_timeouting_cluster(self):
# Tests behavior when Cluster.connect raises
# cassandra.OperationTimedOut.
@@ -128,40 +132,65 @@ def shutdown(self):
with pytest.raises(OTOExc):
x._store_result('task_id', 'result', states.SUCCESS)
- assert x._connection is None
+ assert x._cluster is None
assert x._session is None
- x.process_cleanup() # shouldn't raise
-
- def test_please_free_memory(self):
- # Ensure that Cluster object IS shut down.
+ def test_create_result_table(self):
+ # Tests behavior when session.execute raises
+ # cassandra.AlreadyExists.
from celery.backends import cassandra as mod
- class RAMHoggingCluster(object):
+ class OTOExc(Exception):
+ pass
- objects_alive = 0
+ class FaultySession(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def execute(self, *args, **kwargs):
+ raise OTOExc()
+
+ class DummyCluster(object):
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
- RAMHoggingCluster.objects_alive += 1
- return Mock()
-
- def shutdown(self):
- RAMHoggingCluster.objects_alive -= 1
+ return FaultySession()
mod.cassandra = Mock()
+ mod.cassandra.cluster = Mock()
+ mod.cassandra.cluster.Cluster = DummyCluster
+ mod.cassandra.AlreadyExists = OTOExc
+
+ x = mod.CassandraBackend(app=self.app)
+ x._get_connection(write=True)
+ assert x._session is not None
+
+ def test_init_session(self):
+ # Tests behavior when Cluster.connect works properly
+ from celery.backends import cassandra as mod
+
+ class DummyCluster(object):
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def connect(self, *args, **kwargs):
+ return Mock()
+ mod.cassandra = Mock()
mod.cassandra.cluster = Mock()
- mod.cassandra.cluster.Cluster = RAMHoggingCluster
+ mod.cassandra.cluster.Cluster = DummyCluster
- for x in range(0, 10):
- x = mod.CassandraBackend(app=self.app)
- x._store_result('task_id', 'result', states.SUCCESS)
- x.process_cleanup()
+ x = mod.CassandraBackend(app=self.app)
+ assert x._session is None
+ x._get_connection(write=True)
+ assert x._session is not None
- assert RAMHoggingCluster.objects_alive == 0
+ s = x._session
+ x._get_connection()
+ assert s is x._session
def test_auth_provider(self):
# Ensure valid auth_provider works properly, and invalid one raises
| [CassandraBackend.process_cleanup] is unnecessary
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:4.4.3 (cliffs) kombu:4.6.9 py:3.7.6
billiard:3.6.3.0 py-amqp:2.6.0
platform -> system:Linux arch:64bit, ELF
kernel version:4.15.0-101-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:cassandra
Queue: <class 'kombu.entity.Queue'>
RoundRobinPolicy: <class 'cassandra.policies.RoundRobinPolicy'>
broker_url: 'amqp://guest:********@localhost:5672//'
cassandra_auth_kwargs: {
'password': '********', 'username': 'cassandra'}
cassandra_auth_provider: 'PlainTextAuthProvider'
cassandra_entry_ttl: 86400
cassandra_keyspace: '********'
cassandra_options: {
'load_balancing_policy': <cassandra.policies.RoundRobinPolicy object at 0x7f09de9a34d0>}
cassandra_servers: ['127.0.0.1']
cassandra_table: 'celery_jobs_result'
ignore_result: False
result_backend: 'cassandra'
worker_concurrency: 4
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==2.6.0
attrs==19.3.0
billiard==3.6.3.0
case==1.5.3
cassandra-driver==3.20.2
future==0.18.2
gevent==20.5.2
greenlet==0.4.15
importlib-metadata==1.6.0
kombu==4.6.9
linecache2==1.0.0
mock==4.0.2
more-itertools==8.3.0
nose==1.3.7
packaging==20.4
pluggy==0.13.1
py==1.8.1
pyparsing==2.4.7
pytest==5.4.2
pytz==2020.1
redis==3.5.2
six==1.15.0
traceback2==1.4.0
unittest2==1.1.0
vine==1.3.0
wcwidth==0.1.9
zipp==3.1.0
zope.event==4.4
zope.interface==5.1.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
CassandraBackend work fine
# Actual Behavior
## in prefork pool or solo:
it can work, just not good, i think
## in threads or gevent pool:
get these error frequently
<p>
```
[2020-06-02 21:52:19,592: ERROR/MainProcess] Task add[4bd528a0-c1ba-456c-b74e-7dee054616bb] raised unexpected: AttributeError("'NoneType' object has no attribute 'execute'")
Traceback (most recent call last):
File "/home/bx/project/celery/celery/app/trace.py", line 480, in trace_task
uuid, retval, task_request, publish_result,
File "/home/bx/project/celery/celery/backends/base.py", line 158, in mark_as_done
self.store_result(task_id, result, state, request=request)
File "/home/bx/project/celery/celery/backends/base.py", line 443, in store_result
request=request, **kwargs)
File "/home/bx/project/celery/celery/backends/cassandra.py", line 201, in _store_result
self._session.execute(self._write_stmt, (
AttributeError: 'NoneType' object has no attribute 'execute'
```
<p>
and
<p>
```
[2020-06-02 21:52:29,773: ERROR/MainProcess] Task add[27c980d3-126a-430d-a8e0-92d749ceb735] raised unexpected: OperationTimedOut("errors=errors={'Connection defunct by heartbeat': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042, last_host=None")
Traceback (most recent call last):
File "/home/bx/project/celery/celery/app/trace.py", line 480, in trace_task
uuid, retval, task_request, publish_result,
File "/home/bx/project/celery/celery/backends/base.py", line 158, in mark_as_done
self.store_result(task_id, result, state, request=request)
File "/home/bx/project/celery/celery/backends/base.py", line 443, in store_result
request=request, **kwargs)
File "/home/bx/project/celery/celery/backends/cassandra.py", line 207, in _store_result
buf_t(self.encode(self.current_task_children(request)))
File "cassandra/cluster.py", line 2345, in cassandra.cluster.Session.execute
File "cassandra/cluster.py", line 4304, in cassandra.cluster.ResponseFuture.result
cassandra.OperationTimedOut: errors=errors={'Connection defunct by heartbeat': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042, last_host=None
```
</p>
# Fix it
remove method(CassandraBackend.process_cleanup), it works fine.
because.
all tasks should share one instance of cassandra.cluster.Cluster and session, not create one ervery time. that is the correct way to use cassandra-driver
| are you up for a PR?
@auvipy I am improving it , and up a PR later
looking forward to it. | 2020-06-03T09:38:41 |
celery/celery | 6,223 | celery__celery-6223 | [
"4558"
] | d537be48e41cec1336e8e35f6db271b5f635adb7 | diff --git a/celery/bin/base.py b/celery/bin/base.py
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -474,7 +474,7 @@ def prepare_parser(self, parser):
return parser
def setup_app_from_commandline(self, argv):
- preload_options = self.parse_preload_options(argv)
+ preload_options, remaining_options = self.parse_preload_options(argv)
quiet = preload_options.get('quiet')
if quiet is not None:
self.quiet = quiet
@@ -510,18 +510,18 @@ def setup_app_from_commandline(self, argv):
elif self.app is None:
self.app = self.get_app(loader=loader)
if self.enable_config_from_cmdline:
- argv = self.process_cmdline_config(argv)
+ remaining_options = self.process_cmdline_config(remaining_options)
else:
self.app = Celery(fixups=[])
self._handle_user_preload_options(argv)
- return argv
+ return remaining_options
def _handle_user_preload_options(self, argv):
user_preload = tuple(self.app.user_options['preload'] or ())
if user_preload:
- user_options = self._parse_preload_options(argv, user_preload)
+ user_options, _ = self._parse_preload_options(argv, user_preload)
signals.user_preload_options.send(
sender=self, app=self.app, options=user_options,
)
@@ -550,8 +550,8 @@ def _parse_preload_options(self, args, options):
args = [arg for arg in args if arg not in ('-h', '--help')]
parser = self.Parser()
self.add_compat_options(parser, options)
- namespace, _ = parser.parse_known_args(args)
- return vars(namespace)
+ namespace, unknown_args = parser.parse_known_args(args)
+ return vars(namespace), unknown_args
def add_append_opt(self, acc, opt, value):
default = opt.default or []
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -435,6 +435,13 @@ def on_usage_error(self, exc, command=None):
)))
def _relocate_args_from_start(self, argv, index=0):
+ """Move options to the end of args.
+
+ This rewrites:
+ -l debug worker -c 3
+ to:
+ worker -c 3 -l debug
+ """
if argv:
rest = []
while index < len(argv):
@@ -466,9 +473,6 @@ def _relocate_args_from_start(self, argv, index=0):
# we assume the first argument in argv[i:] is the command
# name.
return argv[index:] + rest
- # if there are no more arguments then the last arg in rest'
- # must be the command.
- [rest.pop()] + rest
return []
def prepare_prog_name(self, name):
| diff --git a/t/unit/bin/test_base.py b/t/unit/bin/test_base.py
--- a/t/unit/bin/test_base.py
+++ b/t/unit/bin/test_base.py
@@ -353,7 +353,7 @@ class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('-s', action='store', dest='silent')
cmd = TestCommand()
- acc = cmd.parse_preload_options(['-s', 'yes'])
+ acc, _ = cmd.parse_preload_options(['-s', 'yes'])
assert acc.get('silent') == 'yes'
def test_parse_preload_options_with_equals_and_append(self):
@@ -363,7 +363,7 @@ class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('--zoom', action='append', default=[])
cmd = Command()
- acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2'])
+ acc, _ = cmd.parse_preload_options(['--zoom=1', '--zoom=2'])
assert acc, {'zoom': ['1' == '2']}
@@ -371,6 +371,6 @@ def test_parse_preload_options_without_equals_and_append(self):
cmd = Command()
opt = Option('--zoom', action='append', default=[])
cmd.preload_options = (opt,)
- acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2'])
+ acc, _ = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2'])
assert acc, {'zoom': ['1' == '2']}
diff --git a/t/unit/bin/test_celery.py b/t/unit/bin/test_celery.py
--- a/t/unit/bin/test_celery.py
+++ b/t/unit/bin/test_celery.py
@@ -16,6 +16,13 @@
from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
+class MyApp(object):
+ user_options = {'preload': None}
+
+
+APP = MyApp() # <-- Used by test_short_and_long_arguments_be_the_same
+
+
class test__main__:
def test_main(self):
@@ -204,6 +211,17 @@ def test_handle_argv(self):
x.handle_argv('celery', ['start', 'foo'])
x.execute.assert_called_with('start', ['start', 'foo'])
+ def test_short_and_long_arguments_be_the_same(self):
+ for arg in "--app", "-A":
+ appstr = '.'.join([__name__, 'APP'])
+ x = CeleryCommand(app=self.app)
+ x.execute = Mock()
+ with pytest.raises(SystemExit):
+ x.execute_from_commandline(['celery', arg, appstr, 'worker'])
+ assert x.execute.called
+ assert x.execute.call_args[0]
+ assert x.execute.call_args[0][0] == "worker"
+
def test_execute(self):
x = CeleryCommand(app=self.app)
Help = x.commands['help'] = Mock()
| Unlike what doc states, --app does not behave like -A
## Steps to reproduce
* Try to run `celery --app foo *anything*`.
* Look at the "celery usage" output instead of the "*anything*" expected output.
## Expected behavior
"-A foo" and "--app foo" to behave the same way.
## Actual behavior
"-A foo" works, while "--app foo" just outputs the "celery usage" screen
## Environment
```
software -> celery:4.1.0 (latentcall) kombu:4.1.0 py:3.6.3
billiard:3.5.0.3 py-amqp:2.2.2
platform -> system:Darwin arch:64bit imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:django-db
```
| whats the status with celery 4.2.1?
Exact same with v4.2.1 (windowlicker).
There is in fact a behavioral difference that depends on arguments position:
`celery worker -A foo` works
`celery worker --app foo` works
`celery -A foo worker` works
`celery --app foo worker` does not work and show celery usage
do you have any suggested improvement in mind?
Well, that I think it should behave the same whatever the order of arguments is. I struggled and lost a bit of time because I first chosed the way that does not work (you know, Murphy's law). I tried to look in the argparsing code of celery to see if I could patch it, but wasn't clear to me how it worked, hence opening the issue. If you point me to the right direction (like where it should be done, and probably how it is tested today), I may be able to submit a patch.
We're adding both `-A` and `--app` here: https://github.com/celery/celery/blob/eeda18611ceed2560145f95ada4977a1b825d282/celery/bin/base.py#L299
They should be entirely interchangeable.
We haven't done anything else to define that argument.
The relevant test which verifies this passes (and you are welcome to run the tests yourself):
https://github.com/celery/celery/blob/f3ef0df5a6e4008d73fdc6e7b6c02c7edf361d5a/t/unit/bin/test_base.py#L184
I cannot reproduce this behavior locally on my Linux machine.
Could this be a bug in argparse that only reproduces on OSX? I find it unlikely but possible.
Unless you can provide me with new information on how to reproduce this bug (e.g. it only happens on OSX or some certain order of arguments) I think it's is safe to close this one.
This does not mean that there is no problem to be fixed. We're just not sure it's a problem within Celery's codebase.
@thedrow Thanks for the infos.
It does indeed appear with certain order of arguments, using "--app x" (not "--app=x", notice the lack of equal sign) *before the celery subcommand* (worker, inspect, ...). It is 100% reproductible on linux, here is how.
I launch a brand new linux image (docker) running "python:3" official image. The image was run using `docker run -it --rm python:3 bash`, and here is the "bootstrap" script I run by hand after launching the container:
```
$ pip install celery
$ cat > foo.py
from celery import Celery
app = Celery("foo")
^D
```
(edited for copypaste mistake)
Minimalist, right ? Now run in this image:
```
# celery -A foo report
software -> celery:4.2.1 (windowlicker) kombu:4.2.1 py:3.7.0
billiard:3.5.0.4 py-amqp:2.3.2
platform -> system:Linux arch:64bit imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:disabled
```
Works as expected. Let's switch to `--app foo`:
```
# celery --app foo report
usage: celery <command> [options]
Show help screen and exit.
... etc ...
```
Note that it does work with `--app=foo`, just not with `--app foo` (notice the lack of equal sign). But celery usage says: `Global Options: -A APP, --app APP`.
I don't agree on the fact that it's "plain argparse". I think that the culprit code is somehow related to `CeleryCommand._relocate_args_from_start` which (as far as I understand) change `sys.argv` order before those parameters as passed to `argparse`, so that global arguments are moved after the celery command (like "worker", "inspect", etc).
I understand the goal, but it does not work 100% with arguments as understood by `argparse`, nor as advertised by the celery usage help.
Behaviour is 100% same on OSX and Linux. I tried to write an unit test but my lack of understanding the celery codebase (and especially, what the MockCommand does) made me fail at that.
(note that this bug is probably also affecting all other global options passed before the command name and that have arguments, like `--broker BROKER`, `--result-backend RESULT_BACKEND`, `--loader LOADER`, `--config CONFIG`, `--workdir WORKDIR` ...)
Example:
* `# celery report -A foo --workdir .` works
* `# celery --workdir . report -A foo` shows celery usage
* `# celery --workdir=. report -A foo` works again
(see https://github.com/celery/celery/blob/master/celery/bin/celery.py#L438:L473)
Thank you for the analysis. This is indeed a bug and I am able to reproduce it as you said.
I used ```celery --app=foo report``` which is why I thought this was not reproducible.
I did see the "=" sign in answers, hence the small emphasis on not using it.
Note that I'd gladly work on a fix, but the complexity added by the argparse-related hack to reorder arguments and the fact that for now, tests does not check cases with arguments before and after the subcommand makes me feel slightly uncomfortable about submitting a patch, as I don't know how to test it.
However, I do understand why the hack exist, as this is a struggle I often have with argparse (having "global arguments" work both before and after subcommand). I think it's a UX defect of argparse, and I understand that celery want to work around it. But this is something that can really annoy users, especially users like me that only use celery every *insert long period of time here* (and Murphy's law forces me to use the only case that does not work, by default ...).
If you can give a few pointers on how to test cases with arguments before and after the subcommand in the unit tests, I can give it a shot. It looks like the test you pointed me to skips argparsing, as MockCommand overrides `parse_options`:
```
19 │ class MockCommand(Command):
20 │ mock_args = ('arg1', 'arg2', 'arg3')
21 │
22 │ def parse_options(self, prog_name, arguments, command=None):
23 │ options = {'foo': 'bar', 'prog_name': prog_name}
24 │ return options, self.mock_args
``` | 2020-07-12T22:46:19 |
celery/celery | 6,251 | celery__celery-6251 | [
"6250"
] | 8b520d188e61be8dc7809932ba86d97ca986778c | diff --git a/celery/backends/base.py b/celery/backends/base.py
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -7,7 +7,6 @@
using K/V semantics like _get and _put.
"""
from __future__ import absolute_import, unicode_literals
-from future.utils import raise_with_traceback
from datetime import datetime, timedelta
import sys
@@ -29,7 +28,7 @@
from celery.exceptions import (ChordError, ImproperlyConfigured,
NotRegistered, TaskRevokedError, TimeoutError,
BackendGetMetaError, BackendStoreError)
-from celery.five import PY3, items
+from celery.five import PY3, items, reraise
from celery.result import (GroupResult, ResultBase, ResultSet,
allow_join_result, result_from_tuple)
from celery.utils.collections import BufferMap
@@ -454,7 +453,11 @@ def store_result(self, task_id, result, state,
self.max_sleep_between_retries_ms, True) / 1000
self._sleep(sleep_amount)
else:
- raise_with_traceback(BackendStoreError("failed to store result on the backend", task_id=task_id, state=state))
+ reraise(
+ BackendStoreError,
+ BackendStoreError("failed to store result on the backend", task_id=task_id, state=state),
+ traceback,
+ )
else:
raise
@@ -521,6 +524,7 @@ def get_task_meta(self, task_id, cache=True):
meta = self._get_task_meta_for(task_id)
break
except Exception as exc:
+ tb = sys.exc_info()[2]
if self.always_retry and self.exception_safe_to_retry(exc):
if retries < self.max_retries:
retries += 1
@@ -532,7 +536,11 @@ def get_task_meta(self, task_id, cache=True):
self.max_sleep_between_retries_ms, True) / 1000
self._sleep(sleep_amount)
else:
- raise_with_traceback(BackendGetMetaError("failed to get meta", task_id=task_id))
+ reraise(
+ BackendGetMetaError,
+ BackendGetMetaError("failed to get meta", task_id=task_id),
+ tb,
+ )
else:
raise
| dependency on future module breaks many things (including vim ultisnips)
the dependency on future-0.18.2 adds many packages that squat on py3 stdlib names eg:
```
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/_dummy_thread/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/_markupbase/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/_thread/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/builtins/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/copyreg/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/future-0.18.2.dist-info/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/future/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/html/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/http/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/libfuturize/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/libpasteurize/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/past/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/queue/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/reprlib/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/socketserver/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/tkinter/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/winreg/*
/home/graingert/.virtualenvs/celery2/lib/python2.7/site-packages/xmlrpc/*
```
this breaks packages that rely on feature detection of these modules.
I propose replacing future with six because:
* future does not provide a wheel, six has a wheel
* future last release Oct 31, 2019, six released Oct 31, 2019
* future is 829.2 kB, six is (11.0 kB)
* celery already depends on six, via kombu and importlib-metadata, and so swapping out future with six would save 829.2 kB, rather than just 818.2 kB
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [ ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [ ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
| 2020-07-28T15:33:47 |
||
celery/celery | 6,259 | celery__celery-6259 | [
"6258"
] | c57100beb179621f4f8f4f33098d0d748ad54a0e | diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -180,14 +180,12 @@ def encode(self, data):
def decode(self, data):
if self.serializer == 'bson':
return data
-
- payload = self.encode(data)
- return super(MongoBackend, self).decode(payload)
+ return super(MongoBackend, self).decode(data)
def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Store return value and state of an executed task."""
- meta = self._get_result_meta(result=result, state=state,
+ meta = self._get_result_meta(result=self.encode(result), state=state,
traceback=traceback, request=request)
# Add the _id for mongodb
meta['_id'] = task_id
| diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -1,9 +1,11 @@
from __future__ import absolute_import, unicode_literals
import datetime
+import sys
from pickle import dumps, loads
import pytest
+import pytz
from case import ANY, MagicMock, Mock, mock, patch, sentinel, skip
from kombu.exceptions import EncodeError
try:
@@ -12,7 +14,7 @@
ConfigurationError = None
from celery import states, uuid
-from celery.backends.mongodb import InvalidDocument, MongoBackend
+from celery.backends.mongodb import InvalidDocument, MongoBackend, Binary
from celery.exceptions import ImproperlyConfigured
COLLECTION = 'taskmeta_celery'
@@ -530,20 +532,138 @@ def test_regression_worker_startup_info(self):
assert worker.startup_info()
[email protected](scope="function")
+def mongo_backend_factory(app):
+ """Return a factory that creates MongoBackend instance with given serializer, including BSON."""
+
+ def create_mongo_backend(serializer):
+ # NOTE: `bson` is a only mongodb-specific type and can be set only directly on MongoBackend instance.
+ if serializer == "bson":
+ beckend = MongoBackend(app=app)
+ beckend.serializer = serializer
+ else:
+ app.conf.accept_content = ['json', 'pickle', 'msgpack', 'yaml']
+ app.conf.result_serializer = serializer
+ beckend = MongoBackend(app=app)
+ return beckend
+
+ yield create_mongo_backend
+
+
@skip.unless_module('pymongo')
[email protected]("serializer,encoded_into", [
+ ('bson', int),
+ ('json', str),
+ ('pickle', Binary),
+ ('msgpack', Binary),
+ ('yaml', str),
+])
class test_MongoBackend_no_mock:
- def test_encode_decode(self, app):
- backend = MongoBackend(app=app)
- data = {'foo': 1}
- assert backend.decode(backend.encode(data))
- backend.serializer = 'bson'
- assert backend.encode(data) == data
- assert backend.decode(data) == data
-
- def test_de(self, app):
- backend = MongoBackend(app=app)
- data = {'foo': 1}
- assert backend.encode(data)
- backend.serializer = 'bson'
- assert backend.encode(data) == data
+ def test_encode(self, mongo_backend_factory, serializer, encoded_into):
+ backend = mongo_backend_factory(serializer=serializer)
+ assert isinstance(backend.encode(10), encoded_into)
+
+ def test_encode_decode(self, mongo_backend_factory, serializer, encoded_into):
+ backend = mongo_backend_factory(serializer=serializer)
+ decoded = backend.decode(backend.encode(12))
+ assert decoded == 12
+
+
+class _MyTestClass(object):
+
+ def __init__(self, a):
+ self.a = a
+
+ def __eq__(self, other):
+ assert self.__class__ == type(other)
+ return self.a == other.a
+
+
+SUCCESS_RESULT_TEST_DATA = [
+ # json types
+ {
+ "result": "A simple string",
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": 100,
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": 9.1999999999999999,
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": {"foo": "simple result"},
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": ["a", "b"],
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": False,
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ {
+ "result": None,
+ "serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
+ },
+ # advanced essential types
+ {
+ "result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0),
+ "serializers": ["bson", "pickle", "yaml"],
+ },
+ {
+ "result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0, tzinfo=pytz.utc),
+ "serializers": ["pickle", "yaml"],
+ },
+ # custom types
+ {
+ "result": _MyTestClass("Hi!"),
+ "serializers": ["pickle"],
+ },
+]
+
+
[email protected]_module('pymongo')
+class test_MongoBackend_store_get_result:
+
+ @pytest.fixture(scope="function", autouse=True)
+ def fake_mongo_collection_patch(self, monkeypatch):
+ """A fake collection with serialization experience close to MongoDB."""
+ bson = pytest.importorskip("bson")
+
+ class FakeMongoCollection(object):
+ def __init__(self):
+ self.data = {}
+
+ def replace_one(self, task_id, meta, upsert=True):
+ self.data[task_id['_id']] = bson.encode(meta)
+
+ def find_one(self, task_id):
+ return bson.decode(self.data[task_id['_id']])
+
+ monkeypatch.setattr(MongoBackend, "collection", FakeMongoCollection())
+
+ @pytest.mark.parametrize("serializer,result_type,result", [
+ (s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA for s in i['serializers']]
+ )
+ def test_encode_success_results(self, mongo_backend_factory, serializer, result_type, result):
+ backend = mongo_backend_factory(serializer=serializer)
+ backend.store_result(TASK_ID, result, 'SUCCESS')
+ recovered = backend.get_result(TASK_ID)
+ if sys.version_info.major == 2 and isinstance(recovered, str):
+ result_type = str # workaround for python 2 compatibility and `unicode_literals`
+ assert type(recovered) == result_type
+ assert recovered == result
+
+ @pytest.mark.parametrize("serializer", ["bson", "pickle", "yaml", "json", "msgpack"])
+ def test_encode_exception_error_results(self, mongo_backend_factory, serializer):
+ backend = mongo_backend_factory(serializer=serializer)
+ exception = Exception("Basic Exception")
+ backend.store_result(TASK_ID, exception, 'FAILURE')
+ recovered = backend.get_result(TASK_ID)
+ assert type(recovered) == type(exception)
+ assert recovered.args == exception.args
| MongoDB result backend serialization logic issue
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- In https://github.com/celery/celery/pull/5661 PR result encoding was removed from `_store_result` call.
- In https://github.com/celery/celery/pull/5918 PR missing result encoding on storing has been attempted to be resolved by adding `encode` call on `decode` call.
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
`store_result` uses `serializer`.
# Actual Behavior
`store_result` ignores `serializer`, so getting serialization errors for data types that are compatible with chosen serializer.
For more details please refer to tests in https://github.com/celery/celery/pull/6259
| 2020-07-29T16:26:46 |
|
celery/celery | 6,288 | celery__celery-6288 | [
"6280"
] | 0df06a87dc4658edcf155a6dafb4ed34d9751cc9 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -213,10 +213,7 @@ def run_tests(self):
entry_points={
'console_scripts': [
'celery = celery.__main__:main',
- ],
- 'pytest11': [
- 'celery = celery.contrib.pytest',
- ],
+ ]
},
project_urls={
"Documentation": "http://docs.celeryproject.org/en/latest/index.html",
| diff --git a/requirements/test.txt b/requirements/test.txt
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,6 +1,7 @@
case>=1.3.1
pytest~=4.6; python_version < '3.0'
pytest~=6.0; python_version >= '3.0'
+pytest-celery
pytest-timeout~=1.4.2
boto3>=1.9.178
python-dateutil<2.8.1,>=2.1; python_version < '3.0'
| stop loading pytest plugin by default
currently celery, by default, will add a set of fixtures and a mark, which can be confusing to new developers on a project when running `pytest --fixtures` and begin using them even through we've not yet opted into and started configuring them.
An alternative is to use an approach like https://github.com/aio-libs/pytest-aiohttp/ where there is a shim package that just lists an entrypoint, and depends on aiohttp.
Those that don't want to install the shim package can add `pytest_plugins = 'celery.contrib.pytest'` to their `pytest.ini`
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ ] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Issue+Type%3A+Feature+Request%22+)
for similar or identical feature requests.
- [ ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22PR+Type%3A+Feature%22+)
for existing proposed implementations of this feature.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same feature was already implemented in the
master branch.
- [ ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the feature does
and why it is needed.
-->
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this feature is going to behave.
Describe what happens in case of failures as well if applicable.
-->
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be introduced for this feature.
-->
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this feature such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| I'll address this later but note that we have a template for feature requests/enhancements.
@thedrow I'm happy to make the PR, I'm just looking for a concept ack
We can create a pytest-celery shim package for Celery 5.0.0 if you'd like. | 2020-08-05T11:35:09 |
celery/celery | 6,294 | celery__celery-6294 | [
"6290"
] | 302bc8e6349cdfd723abb558dd5a330052ccffdd | diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -405,7 +405,7 @@ def _chord_zset(self):
transport_options = self.app.conf.get(
'result_backend_transport_options', {}
)
- return transport_options.get('result_chord_ordered', False)
+ return transport_options.get('result_chord_ordered', True)
def on_chord_part_return(self, request, state, result,
propagate=None, **kwargs):
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -854,7 +854,7 @@ def test_chord_on_error(self, manager):
redis_connection = get_redis_connection()
# The redis key is either a list or zset depending on configuration
if manager.app.conf.result_backend_transport_options.get(
- 'result_chord_ordered', False
+ 'result_chord_ordered', True
):
job_results = redis_connection.zrange(j_key, 0, 3)
else:
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -623,9 +623,9 @@ def test_on_chord_part_return(self, restore):
for i in range(10):
self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i)
- assert self.b.client.rpush.call_count
- self.b.client.rpush.reset_mock()
- assert self.b.client.lrange.call_count
+ assert self.b.client.zadd.call_count
+ self.b.client.zadd.reset_mock()
+ assert self.b.client.zrangebyscore.call_count
jkey = self.b.get_key_for_group('group_id', '.j')
tkey = self.b.get_key_for_group('group_id', '.t')
self.b.client.delete.assert_has_calls([call(jkey), call(tkey)])
@@ -683,9 +683,9 @@ def test_on_chord_part_return_no_expiry(self, restore):
for i in range(10):
self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i)
- assert self.b.client.rpush.call_count
- self.b.client.rpush.reset_mock()
- assert self.b.client.lrange.call_count
+ assert self.b.client.zadd.call_count
+ self.b.client.zadd.reset_mock()
+ assert self.b.client.zrangebyscore.call_count
jkey = self.b.get_key_for_group('group_id', '.j')
tkey = self.b.get_key_for_group('group_id', '.t')
self.b.client.delete.assert_has_calls([call(jkey), call(tkey)])
@@ -805,7 +805,7 @@ def test_on_chord_part_return__ChordError(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, ChordError())
- self.b.client.pipeline.return_value.rpush().llen().get().expire(
+ self.b.client.pipeline.return_value.zadd().zcount().get().expire(
).expire().execute.return_value = (1, 1, 0, 4, 5)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
@@ -849,7 +849,7 @@ def test_on_chord_part_return__other_error(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, RuntimeError())
- self.b.client.pipeline.return_value.rpush().llen().get().expire(
+ self.b.client.pipeline.return_value.zadd().zcount().get().expire(
).expire().execute.return_value = (1, 1, 0, 4, 5)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
| Make redis use of sorted sets for group results opt-out
Following up on #6245 - can we make the use of sorted set opt-out for 5.0 as discussed? In particular, this would simply be a change to the default value of the config value, and associated changes to tests and doco merged in #6245.
| ping @thedrow - if you're on board with getting this into 5.0, would you please add it to the milestone? I'll get a PR up for you later today or tomorrow, won't be a complex one. Thanks! | 2020-08-10T00:23:24 |
celery/celery | 6,298 | celery__celery-6298 | [
"6296"
] | ea37db1410c83271e06d78a564983cba3732a1b1 | diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -1,14 +1,21 @@
"""SQLAlchemy session."""
+import time
+
from kombu.utils.compat import register_after_fork
from sqlalchemy import create_engine
+from sqlalchemy.exc import DatabaseError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
+from celery.utils.time import get_exponential_backoff_interval
+
ResultModelBase = declarative_base()
__all__ = ('SessionManager',)
+PREPARE_MODELS_MAX_RETRIES = 10
+
def _after_fork_cleanup_session(session):
session._after_fork()
@@ -50,7 +57,25 @@ def create_session(self, dburi, short_lived_sessions=False, **kwargs):
def prepare_models(self, engine):
if not self.prepared:
- ResultModelBase.metadata.create_all(engine)
+ # SQLAlchemy will check if the items exist before trying to
+ # create them, which is a race condition. If it raises an error
+ # in one iteration, the next may pass all the existence checks
+ # and the call will succeed.
+ retries = 0
+ while True:
+ try:
+ ResultModelBase.metadata.create_all(engine)
+ except DatabaseError:
+ if retries < PREPARE_MODELS_MAX_RETRIES:
+ sleep_amount_ms = get_exponential_backoff_interval(
+ 10, retries, 1000, True
+ )
+ time.sleep(sleep_amount_ms / 1000)
+ retries += 1
+ else:
+ raise
+ else:
+ break
self.prepared = True
def session_factory(self, dburi, **kwargs):
| diff --git a/t/unit/backends/test_database.py b/t/unit/backends/test_database.py
--- a/t/unit/backends/test_database.py
+++ b/t/unit/backends/test_database.py
@@ -13,7 +13,8 @@
from celery.backends.database import (DatabaseBackend, retry, session, # noqa
session_cleanup)
from celery.backends.database.models import Task, TaskSet # noqa
-from celery.backends.database.session import SessionManager # noqa
+from celery.backends.database.session import ( # noqa
+ PREPARE_MODELS_MAX_RETRIES, ResultModelBase, SessionManager)
from t import skip # noqa
@@ -398,3 +399,28 @@ def test_coverage_madness(self):
SessionManager()
finally:
session.register_after_fork = prev
+
+ @patch('celery.backends.database.session.create_engine')
+ def test_prepare_models_terminates(self, create_engine):
+ """SessionManager.prepare_models has retry logic because the creation
+ of database tables by multiple workers is racy. This test patches
+ the used method to always raise, so we can verify that it does
+ eventually terminate.
+ """
+ from sqlalchemy.dialects.sqlite import dialect
+ from sqlalchemy.exc import DatabaseError
+
+ sqlite = dialect.dbapi()
+ manager = SessionManager()
+ engine = manager.get_engine('dburi')
+
+ def raise_err(bind):
+ raise DatabaseError("", "", [], sqlite.DatabaseError)
+
+ patch_create_all = patch.object(
+ ResultModelBase.metadata, 'create_all', side_effect=raise_err)
+
+ with pytest.raises(DatabaseError), patch_create_all as mock_create_all:
+ manager.prepare_models(engine)
+
+ assert mock_create_all.call_count == PREPARE_MODELS_MAX_RETRIES + 1
| Database backend race condition during table creation
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue. **N/A**
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug. **N/A**
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- https://github.com/celery/celery/issues/4653
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 4.4.7 (cliffs)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:4.4.7 (cliffs) kombu:4.6.11 py:3.8.2
billiard:3.6.3.0 py-amqp:2.6.1
platform -> system:Darwin arch:64bit
kernel version:16.7.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:db+postgresql://<redacted>
include: ['redacted']
accept_content: ['redacted-custom']
database_table_names: {
'group': 'celery_group', 'task': 'celery_task'}
result_serializer: 'redacted-custom'
task_serializer: 'redacted-custom'
task_track_started: True
broker_url: 'amqp://<redacted>'
result_backend: 'db+postgresql://<redacted>'
```
</p>
</details>
# Steps to Reproduce
When celery uses a database result backend, the following line can be called multiple times from different processes:
https://github.com/celery/celery/blob/9a6c2923e859b6993227605610255bd632c1ae68/celery/backends/database/session.py#L56
This is a race condition because SQLAlchemy first checks if the tables/sequences exist and then tries to create them. It causes errors like this (at least on PostgreSQL):
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/redacted.py", line 168, in _redacted
result = async_result.get()
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 226, in get
self.maybe_throw(callback=callback)
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 342, in maybe_throw
self.throw(value, self._to_remote_traceback(tb))
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 335, in throw
self.on_ready.throw(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/vine/promises.py", line 244, in throw
reraise(type(exc), exc, tb)
File "/usr/local/lib/python3.7/site-packages/vine/five.py", line 195, in reraise
raise value
Exception: <class 'sqlalchemy.exc.IntegrityError'>(('(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "pg_type_typname_nsp_index"\nDETAIL: Key (typname, typnamespace)=(taskset_id_sequence, 2200) already exists.\n',))
```
One workaround is to force the table creation ahead of time as was proposed by a user in the issue I linked: https://github.com/celery/celery/issues/4653#issuecomment-400029147.
I think Celery should handle this itself. A possible solution would catch `IntegrityError` and try again until `create_all` succeeds. (Perhaps with a limited number of retries and with sleeps compared to this snippet):
```python
def prepare_models(self, engine):
from sqlalchemy.exc import IntegrityError
if not self.prepared:
while True:
try:
ResultModelBase.metadata.create_all(engine)
except IntegrityError:
continue
else:
break
self.prepared = True
```
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
This example doesn't use celery at all, but shows that calling create_all in multiple processes can cause the error. It's a race condition, so you might need to try it multiple times or play around with the number of processes:
<details>
<p>
Requires a local postgres, and this database must be created:
```
createdb racetest
```
```python
from concurrent.futures import ProcessPoolExecutor, as_completed
from sqlalchemy import Column, Integer, Table, MetaData, create_engine
metadata = MetaData()
tbl1 = Table('tbl1', metadata, Column('id', Integer, primary_key=True))
def create_all(url):
engine = create_engine(url)
metadata.create_all(bind=engine)
def main():
url = 'postgresql:///racetest'
engine = create_engine(url)
# Make sure schema is empty before we start
metadata.drop_all(bind=engine)
with ProcessPoolExecutor(max_workers=50) as executor:
futures = []
for _ in range(50):
future = executor.submit(create_all, url)
futures.append(future)
for fut in as_completed(futures):
fut.result()
if __name__ == '__main__':
main()
```
</p>
</details>
| you are welcome to contribute a fix for this | 2020-08-12T13:26:03 |
celery/celery | 6,330 | celery__celery-6330 | [
"6166",
"6166"
] | 465d26766d6d959e9f871bf3663d3491e4b82883 | diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -328,6 +328,15 @@ def _params_from_url(self, url, defaults):
connparams.update(query)
return connparams
+ @cached_property
+ def retry_policy(self):
+ retry_policy = super().retry_policy
+ if "retry_policy" in self._transport_options:
+ retry_policy = retry_policy.copy()
+ retry_policy.update(self._transport_options['retry_policy'])
+
+ return retry_policy
+
def on_task_call(self, producer, task_id):
if not task_join_will_block():
self.result_consumer.consume_from(task_id)
@@ -401,10 +410,11 @@ def apply_chord(self, header_result, body, **kwargs):
@cached_property
def _chord_zset(self):
- transport_options = self.app.conf.get(
- 'result_backend_transport_options', {}
- )
- return transport_options.get('result_chord_ordered', True)
+ return self._transport_options.get('result_chord_ordered', True)
+
+ @cached_property
+ def _transport_options(self):
+ return self.app.conf.get('result_backend_transport_options', {})
def on_chord_part_return(self, request, state, result,
propagate=None, **kwargs):
@@ -530,12 +540,8 @@ def _get_sentinel_instance(self, **params):
connparams = params.copy()
hosts = connparams.pop("hosts")
- result_backend_transport_opts = self.app.conf.get(
- "result_backend_transport_options", {})
- min_other_sentinels = result_backend_transport_opts.get(
- "min_other_sentinels", 0)
- sentinel_kwargs = result_backend_transport_opts.get(
- "sentinel_kwargs", {})
+ min_other_sentinels = self._transport_options.get("min_other_sentinels", 0)
+ sentinel_kwargs = self._transport_options.get("sentinel_kwargs", {})
sentinel_instance = self.sentinel.Sentinel(
[(cp['host'], cp['port']) for cp in hosts],
@@ -548,9 +554,7 @@ def _get_sentinel_instance(self, **params):
def _get_pool(self, **params):
sentinel_instance = self._get_sentinel_instance(**params)
- result_backend_transport_opts = self.app.conf.get(
- "result_backend_transport_options", {})
- master_name = result_backend_transport_opts.get("master_name", None)
+ master_name = self._transport_options.get("master_name", None)
return sentinel_instance.master_for(
service_name=master_name,
| diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -523,6 +523,29 @@ def test_on_connection_error(self, logger):
assert self.b.on_connection_error(10, exc, intervals, 3) == 30
logger.error.assert_called_with(self.E_LOST, 3, 10, 'in 30.00 seconds')
+ @patch('celery.backends.redis.retry_over_time')
+ def test_retry_policy_conf(self, retry_over_time):
+ self.app.conf.result_backend_transport_options = dict(
+ retry_policy=dict(
+ max_retries=2,
+ interval_start=0,
+ interval_step=0.01,
+ ),
+ )
+ b = self.Backend(app=self.app)
+
+ def fn():
+ return 1
+
+ # We don't want to re-test retry_over_time, just check we called it
+ # with the expected args
+ b.ensure(fn, (),)
+
+ retry_over_time.assert_called_with(
+ fn, b.connection_errors, (), {}, ANY,
+ max_retries=2, interval_start=0, interval_step=0.01, interval_max=1
+ )
+
def test_incr(self):
self.b.client = Mock(name='client')
self.b.incr('foo')
| Expose config option to set retry_policy on (Redis) ResultBackend
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Issue+Type%3A+Feature+Request%22+) for similar or identical feature requests.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22PR+Type%3A+Feature%22+) for existing proposed implementations of this feature.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master) to find out if the if the same feature was already implemented in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue (If there are none, check this box anyway).
# Brief Summary
<!--
Please include a brief summary of what the feature does
and why it is needed.
-->
In debugging another problem I noticed that the default connection timeout for the Redis result backend is unconfigurable, and the timeout is hardcoded at 20s (20 x 1s retries)
This appears to be only actually an issue for the Redis backend (RPC is the only other backend that appears to do anything in `on_task_call`)
`task.apply_async()` calls these functions
https://github.com/celery/celery/blob/d3863d909759f1fd618f2a1af1766ce54c16d39b/celery/app/base.py#L776-L780
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L342-L344
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L167-L177
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L124-L133
`_ensure` is set via https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L97
to be https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L352-L358
But ensure is never passed any kwargs, nor is it set anywhere other than in the Backend supclass https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/base.py#L106-L111
# Design
The "most obvious" way of doing this would be to add a new top-level `result_backend_options` config key that feeds in to the constructor.
Expose config option to set retry_policy on (Redis) ResultBackend
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Issue+Type%3A+Feature+Request%22+) for similar or identical feature requests.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22PR+Type%3A+Feature%22+) for existing proposed implementations of this feature.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master) to find out if the if the same feature was already implemented in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue (If there are none, check this box anyway).
# Brief Summary
<!--
Please include a brief summary of what the feature does
and why it is needed.
-->
In debugging another problem I noticed that the default connection timeout for the Redis result backend is unconfigurable, and the timeout is hardcoded at 20s (20 x 1s retries)
This appears to be only actually an issue for the Redis backend (RPC is the only other backend that appears to do anything in `on_task_call`)
`task.apply_async()` calls these functions
https://github.com/celery/celery/blob/d3863d909759f1fd618f2a1af1766ce54c16d39b/celery/app/base.py#L776-L780
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L342-L344
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L167-L177
https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L124-L133
`_ensure` is set via https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L97
to be https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/redis.py#L352-L358
But ensure is never passed any kwargs, nor is it set anywhere other than in the Backend supclass https://github.com/celery/celery/blob/bf6139bf651b20bc04b895a5f6eb8d50320bc252/celery/backends/base.py#L106-L111
# Design
The "most obvious" way of doing this would be to add a new top-level `result_backend_options` config key that feeds in to the constructor.
| I'll be lookin forward to a PR
Will find time to open one.
I'm still not that familiar with the internals of Celery/Kombu so wanted to check this all though first
I'll be lookin forward to a PR
Will find time to open one.
I'm still not that familiar with the internals of Celery/Kombu so wanted to check this all though first | 2020-09-02T17:00:06 |
celery/celery | 6,342 | celery__celery-6342 | [
"6341"
] | 05da357502a109c05b35392391299d75d181ccab | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1047,8 +1047,16 @@ class group(Signature):
@classmethod
def from_dict(cls, d, app=None):
+ # We need to mutate the `kwargs` element in place to avoid confusing
+ # `freeze()` implementations which end up here and expect to be able to
+ # access elements from that dictionary later and refer to objects
+ # canonicalized here
+ orig_tasks = d["kwargs"]["tasks"]
+ d["kwargs"]["tasks"] = rebuilt_tasks = type(orig_tasks)((
+ maybe_signature(task, app=app) for task in orig_tasks
+ ))
return _upgrade(
- d, group(d['kwargs']['tasks'], app=app, **d['options']),
+ d, group(rebuilt_tasks, app=app, **d['options']),
)
def __init__(self, *tasks, **options):
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -1,6 +1,6 @@
from time import sleep
-from celery import Task, chain, chord, group, shared_task
+from celery import Signature, Task, chain, chord, group, shared_task
from celery.exceptions import SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
@@ -244,3 +244,67 @@ def run(self):
if self.request.retries:
return self.request.retries
raise ValueError()
+
+
+# The signatures returned by these tasks wouldn't actually run because the
+# arguments wouldn't be fulfilled - we never actually delay them so it's fine
+@shared_task
+def return_nested_signature_chain_chain():
+ return chain(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_group():
+ return chain(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_chord():
+ return chain(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_group_chain():
+ return group(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_group():
+ return group(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_chord():
+ return group(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_chord_chain():
+ return chord(chain([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_group():
+ return chord(group([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_chord():
+ return chord(chord([add.s()], add.s()), add.s())
+
+
+@shared_task
+def rebuild_signature(sig_dict):
+ sig_obj = Signature.from_dict(sig_dict)
+
+ def _recurse(sig):
+ if not isinstance(sig, Signature):
+ raise TypeError("{!r} is not a signature object".format(sig))
+ # Most canvas types have a `tasks` attribute
+ if isinstance(sig, (chain, group, chord)):
+ for task in sig.tasks:
+ _recurse(task)
+ # `chord`s also have a `body` attribute
+ if isinstance(sig, chord):
+ _recurse(sig.body)
+ _recurse(sig_obj)
| diff --git a/requirements/test.txt b/requirements/test.txt
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,6 +1,7 @@
case>=1.3.1
pytest~=6.0
pytest-celery
+pytest-subtests
pytest-timeout~=1.4.2
boto3>=1.9.178
moto==1.3.7
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -9,6 +9,7 @@
from celery.exceptions import TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
+from . import tasks
from .conftest import get_active_redis_channels, get_redis_connection
from .tasks import (ExpectedException, add, add_chord_to_chord, add_replaced,
add_to_all, add_to_all_to_chord, build_chain_inside_task,
@@ -1095,3 +1096,101 @@ def test_nested_chord_group_chain_group_tail(self, manager):
)
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [[42, 42]]
+
+
+class test_signature_serialization:
+ """
+ Confirm nested signatures can be rebuilt after passing through a backend.
+
+ These tests are expected to finish and return `None` or raise an exception
+ in the error case. The exception indicates that some element of a nested
+ signature object was not properly deserialized from its dictionary
+ representation, and would explode later on if it were used as a signature.
+ """
+ def test_rebuild_nested_chain_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chain_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_group_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_group(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -2,6 +2,7 @@
from unittest.mock import MagicMock, Mock, call, patch, sentinel
import pytest
+import pytest_subtests # noqa: F401
from celery._state import _task_stack
from celery.canvas import (Signature, _chain, _maybe_group, chain, chord,
@@ -326,13 +327,9 @@ def test_from_dict_no_tasks(self):
def test_from_dict_full_subtasks(self):
c = chain(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6))
-
serialized = json.loads(json.dumps(c))
-
deserialized = chain.from_dict(serialized)
-
- for task in deserialized.tasks:
- assert isinstance(task, Signature)
+ assert all(isinstance(task, Signature) for task in deserialized.tasks)
@pytest.mark.usefixtures('depends_on_current_app')
def test_app_falls_back_to_default(self):
@@ -345,9 +342,8 @@ def test_handles_dicts(self):
)
c.freeze()
tasks, _ = c._frozen
- for task in tasks:
- assert isinstance(task, Signature)
- assert task.app is self.app
+ assert all(isinstance(task, Signature) for task in tasks)
+ assert all(task.app is self.app for task in tasks)
def test_groups_in_chain_to_chord(self):
g1 = group([self.add.s(2, 2), self.add.s(4, 4)])
@@ -698,6 +694,30 @@ def test_from_dict(self):
x['args'] = None
assert group.from_dict(dict(x))
+ def test_from_dict_deep_deserialize(self):
+ original_group = group([self.add.s(1, 2)] * 42)
+ serialized_group = json.loads(json.dumps(original_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+
+ def test_from_dict_deeper_deserialize(self):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ outer_group = group([inner_group] * 42)
+ serialized_group = json.loads(json.dumps(outer_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_group.tasks
+ for grandchild_task in child_task.tasks
+ )
+
def test_call_empty_group(self):
x = group(app=self.app)
assert not len(x())
@@ -1005,22 +1025,35 @@ def test_repr(self):
x.kwargs['body'] = None
assert 'without body' in repr(x)
- def test_freeze_tasks_body_is_group(self):
- # Confirm that `group index` is passed from a chord to elements of its
- # body when the chord itself is encapsulated in a group
+ def test_freeze_tasks_body_is_group(self, subtests):
+ # Confirm that `group index` values counting up from 0 are set for
+ # elements of a chord's body when the chord is encapsulated in a group
body_elem = self.add.s()
- chord_body = group([body_elem])
+ chord_body = group([body_elem] * 42)
chord_obj = chord(self.add.s(), body=chord_body)
top_group = group([chord_obj])
# We expect the body to be the signature we passed in before we freeze
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is body_elem
- assert embedded_body_elem.options == dict()
- # When we freeze the chord, its body will be clones and options set
+ with subtests.test(msg="Validate body tasks are retained"):
+ assert all(
+ embedded_body_elem is body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ # We also expect the body to have no initial options - since all of the
+ # embedded body elements are confirmed to be `body_elem` this is valid
+ assert body_elem.options == {}
+ # When we freeze the chord, its body will be cloned and options set
top_group.freeze()
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is not body_elem
- assert embedded_body_elem.options["group_index"] == 0 # 0th task
+ with subtests.test(
+ msg="Validate body group indicies count from 0 after freezing"
+ ):
+ assert all(
+ embedded_body_elem is not body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ assert all(
+ embedded_body_elem.options["group_index"] == i
+ for i, embedded_body_elem in enumerate(chord_obj.body.tasks)
+ )
def test_freeze_tasks_is_not_group(self):
x = chord([self.add.s(2, 2)], body=self.add.s(), app=self.app)
@@ -1050,6 +1083,117 @@ def chord_add():
_state.task_join_will_block = fixture_task_join_will_block
result.task_join_will_block = fixture_task_join_will_block
+ def test_from_dict(self):
+ header = self.add.s(1, 2)
+ original_chord = chord(header=header)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_with_body(self):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_deep_deserialize(self, subtests):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ with subtests.test(msg="Validate chord header tasks is deserialized"):
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_chord.tasks
+ )
+ with subtests.test(msg="Verify chord body is deserialized"):
+ assert isinstance(deserialized_chord.body, Signature)
+
+ def test_from_dict_deep_deserialize_group(self, subtests):
+ header = body = group([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+
+ def test_from_dict_deeper_deserialize_group(self, subtests):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ header = body = group([inner_group] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_chord.tasks
+ for grandchild_task in child_task.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, group)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+ assert all(
+ isinstance(body_grandchild_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ for body_grandchild_task in body_child_task.tasks
+ )
+
+ def test_from_dict_deep_deserialize_chain(self, subtests):
+ header = body = chain([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a chain gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, chain)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a chain gets mutatated into the hidden `_chain` class
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, _chain)
+
class test_maybe_signature(CanvasCase):
| Chords contained in a group raise `AttributeError`s during freezing if passed through a backend
When calling `self.replace()` with a signature reconstructed from a serialized dictionary (e.g. after having been passed through a backend), if that signature is (or contains) a group which itself contains a chord, celery explodes after attempting to treat deeply nested dictionaries as signature objects. See below for a minimal repro I got together today.
My gut feel is that instantiating a signature from a dictionary may not be recursing down through the structure far enough and it leaves some of the encapsulated tasks as dicts. I've also noticed that groups containing a group also break in the same way, but I think that's because there's an internal promotion to a chord happening somewhere.
# Checklist
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
#### Related Issues
- #4015
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.1
billiard==3.6.3.0
# Editable install with no version control (celery==5.0.0rc3)
-e /home/maybe/tmp/capp/venv/lib/python3.8/site-packages
click==7.1.2
click-didyoumean==0.0.3
click-repl==0.1.6
future==0.18.2
kombu==5.0.2
prompt-toolkit==3.0.7
pytz==2020.1
redis==3.5.3
six==1.15.0
vine==5.0.0
wcwidth==0.2.5
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
import celery
app = celery.Celery("app", backend="redis://")
@app.task
def foo(*_):
return 42
@app.task(bind=True)
def replace_with(self, sig):
assert isinstance(sig, dict)
sig = celery.Signature.from_dict(sig)
raise self.replace(sig)
if __name__ == "__main__":
sig = celery.group(
celery.group(foo.s()),
)
res = sig.delay()
print(res.get())
sig.freeze()
res = replace_with.delay(sig)
print(res.get())
```
</p>
</details>
# Expected Behavior
It shouldn't explode. Presumably tasks within the group/chord should be signatures rather than dicts.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Stack trace in the worker output:
```
[2020-09-08 12:44:05,453: DEBUG/MainProcess] Task accepted: app.replace_with[dcea02fd-23a3-404a-9fdd-b213eb51c0d1] pid:453431
[2020-09-08 12:44:05,457: ERROR/ForkPoolWorker-8] Task app.replace_with[dcea02fd-23a3-404a-9fdd-b213eb51c0d1] raised unexpected: AttributeError("'dict' object has no attribute '_app'")
Traceback (most recent call last):
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/kombu/utils/objects.py", line 41, in __get__
return obj.__dict__[self.__name__]
KeyError: 'app'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/trace.py", line 409, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/trace.py", line 701, in __protected_call__
return self.run(*args, **kwargs)
File "/home/maybe/tmp/capp/app.py", line 13, in replace_with
raise self.replace(sig)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/task.py", line 894, in replace
sig.freeze(self.request.id)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1302, in freeze
self.tasks = group(self.tasks, app=self.app)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/kombu/utils/objects.py", line 43, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1456, in app
return self._get_app(self.body)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1466, in _get_app
app = tasks[0]._app
AttributeError: 'dict' object has no attribute '_app'
```
| This patch seems to fix the issue for the test case, and the idea might just need to be replicated to other signatures types that contain other signatures (e.g. chords, chains):
```patch
*** celery/canvas.py 2020-09-08 12:03:57.627712549 +1000
--- venv/lib/python3.8/site-packages/celery/canvas.py 2020-09-08 12:58:24.277149667 +1000
***************
*** 1022,1029 ****
@classmethod
def from_dict(cls, d, app=None):
return _upgrade(
! d, group(d['kwargs']['tasks'], app=app, **d['options']),
)
def __init__(self, *tasks, **options):
--- 1022,1033 ----
@classmethod
def from_dict(cls, d, app=None):
+ tasks = (
+ Signature.from_dict(dict(t))
+ for t in d['kwargs']['tasks']
+ )
return _upgrade(
! d, group(tasks, app=app, **d['options']),
)
def __init__(self, *tasks, **options):
***************
```
Edit: In fact, it looks like this issue may have already been fixed for `chain`s
https://github.com/celery/celery/blob/465d26766d6d959e9f871bf3663d3491e4b82883/celery/canvas.py#L583-L590
This is the commit which fixed the same issue for chains: 230b1ff7cb59b720464ffa9cc76fb19b4366d775 and #4015 extended the original fix to deserialize all tasks in a chain. | 2020-09-08T03:29:53 |
celery/celery | 6,357 | celery__celery-6357 | [
"5144"
] | 5a0c45857640f2415567736ad7ad2b7ae69e1304 | diff --git a/celery/app/log.py b/celery/app/log.py
--- a/celery/app/log.py
+++ b/celery/app/log.py
@@ -221,7 +221,7 @@ def _detect_handler(self, logfile=None):
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, 'write'):
return logging.StreamHandler(logfile)
- return WatchedFileHandler(logfile)
+ return WatchedFileHandler(logfile, encoding='utf-8')
def _has_handler(self, logger):
return any(
| Logger set to 'ascii' instead of 'utf-8': UnicodeDecodeError 'ascii'
## Checklist
Report:
```
software -> celery:4.2.1 (windowlicker) kombu:4.2.1 py:3.6.5
billiard:3.5.0.4 py-amqp:2.3.2
platform -> system:Linux arch:64bit imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:indicore.celery_app:Backend
broker_url: 'amqp://indico:********@rabbitmq:5672//'
task_serializer: 'msgpack-numpy'
result_serializer: 'msgpack-numpy'
enable_utc: True
worker_send_task_events: True
result_expires: 86400
task_always_eager: False
accept_content: ['application/x-msgpack']
result_backend: 'indicore.celery_app:Backend'
redis_port: 6379
redis_host: 'celery-redis'
redis_max_connections: 1000
broker_transport_options: {
'confirm_publish': True}
broker_heartbeat: 20
broker_connection_max_retries: None
task_queue_ha_policy: 'all'
```
## Steps to reproduce
Celery is logging the success result of a task that includes characters outside ascii encoding.
## Expected behavior
I expect the celery logger to use 'utf-8' encoding rather than ascii. I haven't touched the celery logging, nor do I have a python logging setup separately at the moment. I am using Python3.
## Actual behavior
I receive the following traceback:
```
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] --- Logging error ---
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] Traceback (most recent call last):
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/logging/__init__.py", line 994, in emit
stream.write(msg)
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] UnicodeEncodeError: 'ascii' codec can't encode characters in position 923-928: ordinal not in range(128)
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] Call stack:
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/__main__.py", line 20, in <module>
main()
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/__main__.py", line 16, in main
_main()
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 322, in main
cmd.execute_from_commandline(argv)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 496, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/base.py", line 275, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 488, in handle_argv
return self.execute(command, argv)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 420, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/worker.py", line 223, in run_from_argv
return self(*args, **options)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/base.py", line 238, in __call__
ret = self.run(*args, **kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/worker.py", line 258, in run
worker.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/worker/worker.py", line 205, in start
self.blueprint.start(self)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bootsteps.py", line 369, in start
return self.obj.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/base.py", line 131, in start
self.on_start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/prefork.py", line 112, in on_start
**self.options)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/asynpool.py", line 432, in __init__
super(AsynPool, self).__init__(processes, *args, **kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 1007, in __init__
self._create_worker_process(i)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/asynpool.py", line 449, in _create_worker_process
return super(AsynPool, self)._create_worker_process(i)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 1116, in _create_worker_process
w.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 124, in start
self._popen = self._Popen(self)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/context.py", line 333, in _Popen
return Popen(process_obj)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/popen_fork.py", line 24, in __init__
self._launch(process_obj)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/popen_fork.py", line 79, in _launch
code = process_obj._bootstrap()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 327, in _bootstrap
self.run()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 289, in __call__
sys.exit(self.workloop(pid=pid))
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 358, in workloop
result = (True, prepare_result(fun(*args, **kwargs)))
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 549, in _fast_trace_task
uuid, args, kwargs, request,
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 458, in trace_task
'runtime': T,
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 124, in info
logger.info(fmt, context, extra={'data': context})
```
# Additional Info
- I ran `sys.getdefaultencoding()` before before `celery/app/trace.py` line 124 and the value is "utf-8" as I expected.
- I do have the LANG set up properly in the machine
- the machine's default locale is also set the same
- I also added the LANG in front of the celery bin in celeryd.
- I can manually print `context` once I've decoded it "utf-8", which is successfully redirected to the celery logger from what I can see.
| I've done some more debugging and found that `context` becomes a string here. Msgpack serializes my return value as bytes, but somewhere along the way to the logger, the bytes seem to be cast to a string It looks something like `"b'.. bytes...'"`
At the moment, I'm working around this by patching the `info` method in `celery/app/trace.py` with the following:
```python3
def info(fmt, context):
"""Log 'fmt % context' with severity 'INFO'.
'context' is also passed in extra with key 'data' for custom handlers.
"""
if isinstance(context["return_value"], str):
context_copy = copy.deepcopy(context)
context_copy["return_value"] = context_copy["return_value"].encode("utf-8")
return orig_info(fmt, context_copy)
return orig_info(fmt, context)
```
There must be a way to resolve this issue that I am unaware of. I would love guidance on where I should look. Thank you!
Do you happen to have a test case for reproducing this issue?
2 years too late but I'm currently suffering with this issue so, here is a test case.
```
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def test():
logger.debug("테스트")
```
We test this in our integration suite.
Does this always reproduce or only when writing to a file? | 2020-09-21T10:36:52 |
|
celery/celery | 6,360 | celery__celery-6360 | [
"6356"
] | 802ead0379767c1032e441b6c6275db263939963 | diff --git a/celery/backends/amqp.py b/celery/backends/amqp.py
deleted file mode 100644
--- a/celery/backends/amqp.py
+++ /dev/null
@@ -1,322 +0,0 @@
-"""The old AMQP result backend, deprecated and replaced by the RPC backend."""
-import socket
-import time
-from collections import deque
-from operator import itemgetter
-
-from kombu import Consumer, Exchange, Producer, Queue
-
-from celery import states
-from celery.exceptions import TimeoutError
-from celery.utils import deprecated
-from celery.utils.log import get_logger
-
-from .base import BaseBackend
-
-__all__ = ('BacklogLimitExceeded', 'AMQPBackend')
-
-logger = get_logger(__name__)
-
-
-class BacklogLimitExceeded(Exception):
- """Too much state history to fast-forward."""
-
-
-def repair_uuid(s):
- # Historically the dashes in UUIDS are removed from AMQ entity names,
- # but there's no known reason to. Hopefully we'll be able to fix
- # this in v4.0.
- return '{}-{}-{}-{}-{}'.format(s[:8], s[8:12], s[12:16], s[16:20], s[20:])
-
-
-class NoCacheQueue(Queue):
- can_cache_declaration = False
-
-
-class AMQPBackend(BaseBackend):
- """The AMQP result backend.
-
- Deprecated: Please use the RPC backend or a persistent backend.
- """
-
- Exchange = Exchange
- Queue = NoCacheQueue
- Consumer = Consumer
- Producer = Producer
-
- BacklogLimitExceeded = BacklogLimitExceeded
-
- persistent = True
- supports_autoexpire = True
- supports_native_join = True
-
- retry_policy = {
- 'max_retries': 20,
- 'interval_start': 0,
- 'interval_step': 1,
- 'interval_max': 1,
- }
-
- def __init__(self, app, connection=None, exchange=None, exchange_type=None,
- persistent=None, serializer=None, auto_delete=True, **kwargs):
- deprecated.warn(
- 'The AMQP result backend', deprecation='4.0', removal='5.0',
- alternative='Please use RPC backend or a persistent backend.')
- super().__init__(app, **kwargs)
- conf = self.app.conf
- self._connection = connection
- self.persistent = self.prepare_persistent(persistent)
- self.delivery_mode = 2 if self.persistent else 1
- exchange = exchange or conf.result_exchange
- exchange_type = exchange_type or conf.result_exchange_type
- self.exchange = self._create_exchange(
- exchange, exchange_type, self.delivery_mode,
- )
- self.serializer = serializer or conf.result_serializer
- self.auto_delete = auto_delete
-
- def _create_exchange(self, name, type='direct', delivery_mode=2):
- return self.Exchange(name=name,
- type=type,
- delivery_mode=delivery_mode,
- durable=self.persistent,
- auto_delete=False)
-
- def _create_binding(self, task_id):
- name = self.rkey(task_id)
- return self.Queue(
- name=name,
- exchange=self.exchange,
- routing_key=name,
- durable=self.persistent,
- auto_delete=self.auto_delete,
- expires=self.expires,
- )
-
- def revive(self, channel):
- pass
-
- def rkey(self, task_id):
- return task_id.replace('-', '')
-
- def destination_for(self, task_id, request):
- if request:
- return self.rkey(task_id), request.correlation_id or task_id
- return self.rkey(task_id), task_id
-
- def store_result(self, task_id, result, state,
- traceback=None, request=None, **kwargs):
- """Send task return value and state."""
- routing_key, correlation_id = self.destination_for(task_id, request)
- if not routing_key:
- return
-
- payload = {'task_id': task_id, 'status': state,
- 'result': self.encode_result(result, state),
- 'traceback': traceback,
- 'children': self.current_task_children(request)}
- if self.app.conf.find_value_for_key('extended', 'result'):
- payload['name'] = getattr(request, 'task_name', None)
- payload['args'] = getattr(request, 'args', None)
- payload['kwargs'] = getattr(request, 'kwargs', None)
- payload['worker'] = getattr(request, 'hostname', None)
- payload['retries'] = getattr(request, 'retries', None)
- payload['queue'] = request.delivery_info.get('routing_key')\
- if hasattr(request, 'delivery_info') \
- and request.delivery_info else None
-
- with self.app.amqp.producer_pool.acquire(block=True) as producer:
- producer.publish(
- payload,
- exchange=self.exchange,
- routing_key=routing_key,
- correlation_id=correlation_id,
- serializer=self.serializer,
- retry=True, retry_policy=self.retry_policy,
- declare=self.on_reply_declare(task_id),
- delivery_mode=self.delivery_mode,
- )
-
- def on_reply_declare(self, task_id):
- return [self._create_binding(task_id)]
-
- def wait_for(self, task_id, timeout=None, cache=True,
- no_ack=True, on_interval=None,
- READY_STATES=states.READY_STATES,
- PROPAGATE_STATES=states.PROPAGATE_STATES,
- **kwargs):
- cached_meta = self._cache.get(task_id)
- if cache and cached_meta and \
- cached_meta['status'] in READY_STATES:
- return cached_meta
- try:
- return self.consume(task_id, timeout=timeout, no_ack=no_ack,
- on_interval=on_interval)
- except socket.timeout:
- raise TimeoutError('The operation timed out.')
-
- def get_task_meta(self, task_id, backlog_limit=1000):
- # Polling and using basic_get
- with self.app.pool.acquire_channel(block=True) as (_, channel):
- binding = self._create_binding(task_id)(channel)
- binding.declare()
-
- prev = latest = acc = None
- for i in range(backlog_limit): # spool ffwd
- acc = binding.get(
- accept=self.accept, no_ack=False,
- )
- if not acc: # no more messages
- break
- if acc.payload['task_id'] == task_id:
- prev, latest = latest, acc
- if prev:
- # backends are not expected to keep history,
- # so we delete everything except the most recent state.
- prev.ack()
- prev = None
- else:
- raise self.BacklogLimitExceeded(task_id)
-
- if latest:
- payload = self._cache[task_id] = self.meta_from_decoded(
- latest.payload)
- latest.requeue()
- return payload
- else:
- # no new state, use previous
- try:
- return self._cache[task_id]
- except KeyError:
- # result probably pending.
- return {'status': states.PENDING, 'result': None}
- poll = get_task_meta # XXX compat
-
- def drain_events(self, connection, consumer,
- timeout=None, on_interval=None, now=time.monotonic, wait=None):
- wait = wait or connection.drain_events
- results = {}
-
- def callback(meta, message):
- if meta['status'] in states.READY_STATES:
- results[meta['task_id']] = self.meta_from_decoded(meta)
-
- consumer.callbacks[:] = [callback]
- time_start = now()
-
- while 1:
- # Total time spent may exceed a single call to wait()
- if timeout and now() - time_start >= timeout:
- raise socket.timeout()
- try:
- wait(timeout=1)
- except socket.timeout:
- pass
- if on_interval:
- on_interval()
- if results: # got event on the wanted channel.
- break
- self._cache.update(results)
- return results
-
- def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
- wait = self.drain_events
- with self.app.pool.acquire_channel(block=True) as (conn, channel):
- binding = self._create_binding(task_id)
- with self.Consumer(channel, binding,
- no_ack=no_ack, accept=self.accept) as consumer:
- while 1:
- try:
- return wait(
- conn, consumer, timeout, on_interval)[task_id]
- except KeyError:
- continue
-
- def _many_bindings(self, ids):
- return [self._create_binding(task_id) for task_id in ids]
-
- def get_many(self, task_ids, timeout=None, no_ack=True,
- on_message=None, on_interval=None,
- now=time.monotonic, getfields=itemgetter('status', 'task_id'),
- READY_STATES=states.READY_STATES,
- PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
- with self.app.pool.acquire_channel(block=True) as (conn, channel):
- ids = set(task_ids)
- cached_ids = set()
- mark_cached = cached_ids.add
- for task_id in ids:
- try:
- cached = self._cache[task_id]
- except KeyError:
- pass
- else:
- if cached['status'] in READY_STATES:
- yield task_id, cached
- mark_cached(task_id)
- ids.difference_update(cached_ids)
- results = deque()
- push_result = results.append
- push_cache = self._cache.__setitem__
- decode_result = self.meta_from_decoded
-
- def _on_message(message):
- body = decode_result(message.decode())
- if on_message is not None:
- on_message(body)
- state, uid = getfields(body)
- if state in READY_STATES:
- push_result(body) \
- if uid in task_ids else push_cache(uid, body)
-
- bindings = self._many_bindings(task_ids)
- with self.Consumer(channel, bindings, on_message=_on_message,
- accept=self.accept, no_ack=no_ack):
- wait = conn.drain_events
- popleft = results.popleft
- while ids:
- wait(timeout=timeout)
- while results:
- state = popleft()
- task_id = state['task_id']
- ids.discard(task_id)
- push_cache(task_id, state)
- yield task_id, state
- if on_interval:
- on_interval()
-
- def reload_task_result(self, task_id):
- raise NotImplementedError(
- 'reload_task_result is not supported by this backend.')
-
- def reload_group_result(self, task_id):
- """Reload group result, even if it has been previously fetched."""
- raise NotImplementedError(
- 'reload_group_result is not supported by this backend.')
-
- def save_group(self, group_id, result):
- raise NotImplementedError(
- 'save_group is not supported by this backend.')
-
- def restore_group(self, group_id, cache=True):
- raise NotImplementedError(
- 'restore_group is not supported by this backend.')
-
- def delete_group(self, group_id):
- raise NotImplementedError(
- 'delete_group is not supported by this backend.')
-
- def __reduce__(self, args=(), kwargs=None):
- kwargs = kwargs if kwargs else {}
- kwargs.update(
- connection=self._connection,
- exchange=self.exchange.name,
- exchange_type=self.exchange.type,
- persistent=self.persistent,
- serializer=self.serializer,
- auto_delete=self.auto_delete,
- expires=self.expires,
- )
- return super().__reduce__(args, kwargs)
-
- def as_uri(self, include_password=True):
- return 'amqp://'
| diff --git a/t/unit/app/test_backends.py b/t/unit/app/test_backends.py
--- a/t/unit/app/test_backends.py
+++ b/t/unit/app/test_backends.py
@@ -3,7 +3,6 @@
import pytest
from celery.app import backends
-from celery.backends.amqp import AMQPBackend
from celery.backends.cache import CacheBackend
from celery.exceptions import ImproperlyConfigured
@@ -11,7 +10,6 @@
class test_backends:
@pytest.mark.parametrize('url,expect_cls', [
- ('amqp://', AMQPBackend),
('cache+memory://', CacheBackend),
])
def test_get_backend_aliases(self, url, expect_cls, app):
diff --git a/t/unit/backends/test_amqp.py b/t/unit/backends/test_amqp.py
deleted file mode 100644
--- a/t/unit/backends/test_amqp.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import pickle
-from contextlib import contextmanager
-from datetime import timedelta
-from pickle import dumps, loads
-from queue import Empty, Queue
-from unittest.mock import Mock
-
-import pytest
-from billiard.einfo import ExceptionInfo
-from case import mock
-
-from celery import states, uuid
-from celery.app.task import Context
-from celery.backends.amqp import AMQPBackend
-from celery.result import AsyncResult
-
-
-class SomeClass:
-
- def __init__(self, data):
- self.data = data
-
-
-class test_AMQPBackend:
-
- def setup(self):
- self.app.conf.result_cache_max = 100
-
- def create_backend(self, **opts):
- opts = dict({'serializer': 'pickle', 'persistent': True}, **opts)
- return AMQPBackend(self.app, **opts)
-
- def test_destination_for(self):
- b = self.create_backend()
- request = Mock()
- assert b.destination_for('id', request) == (
- b.rkey('id'), request.correlation_id,
- )
-
- def test_store_result__no_routing_key(self):
- b = self.create_backend()
- b.destination_for = Mock()
- b.destination_for.return_value = None, None
- b.store_result('id', None, states.SUCCESS)
-
- def test_mark_as_done(self):
- tb1 = self.create_backend(max_cached_results=1)
- tb2 = self.create_backend(max_cached_results=1)
-
- tid = uuid()
-
- tb1.mark_as_done(tid, 42)
- assert tb2.get_state(tid) == states.SUCCESS
- assert tb2.get_result(tid) == 42
- assert tb2._cache.get(tid)
- assert tb2.get_result(tid), 42
-
- @pytest.mark.usefixtures('depends_on_current_app')
- def test_pickleable(self):
- assert loads(dumps(self.create_backend()))
-
- def test_revive(self):
- tb = self.create_backend()
- tb.revive(None)
-
- def test_is_pickled(self):
- tb1 = self.create_backend()
- tb2 = self.create_backend()
-
- tid2 = uuid()
- result = {'foo': 'baz', 'bar': SomeClass(12345)}
- tb1.mark_as_done(tid2, result)
- # is serialized properly.
- rindb = tb2.get_result(tid2)
- assert rindb.get('foo') == 'baz'
- assert rindb.get('bar').data == 12345
-
- def test_mark_as_failure(self):
- tb1 = self.create_backend()
- tb2 = self.create_backend()
-
- tid3 = uuid()
- try:
- raise KeyError('foo')
- except KeyError as exception:
- einfo = ExceptionInfo()
- tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
- assert tb2.get_state(tid3) == states.FAILURE
- assert isinstance(tb2.get_result(tid3), KeyError)
- assert tb2.get_traceback(tid3) == einfo.traceback
-
- def test_repair_uuid(self):
- from celery.backends.amqp import repair_uuid
- for i in range(10):
- tid = uuid()
- assert repair_uuid(tid.replace('-', '')) == tid
-
- def test_expires_is_int(self):
- b = self.create_backend(expires=48)
- q = b._create_binding('x1y2z3')
- assert q.expires == 48
-
- def test_expires_is_float(self):
- b = self.create_backend(expires=48.3)
- q = b._create_binding('x1y2z3')
- assert q.expires == 48.3
-
- def test_expires_is_timedelta(self):
- b = self.create_backend(expires=timedelta(minutes=1))
- q = b._create_binding('x1y2z3')
- assert q.expires == 60
-
- @mock.sleepdeprived()
- def test_store_result_retries(self):
- iterations = [0]
- stop_raising_at = [5]
-
- def publish(*args, **kwargs):
- if iterations[0] > stop_raising_at[0]:
- return
- iterations[0] += 1
- raise KeyError('foo')
-
- backend = AMQPBackend(self.app)
- from celery.app.amqp import Producer
- prod, Producer.publish = Producer.publish, publish
- try:
- with pytest.raises(KeyError):
- backend.retry_policy['max_retries'] = None
- backend.store_result('foo', 'bar', 'STARTED')
-
- with pytest.raises(KeyError):
- backend.retry_policy['max_retries'] = 10
- backend.store_result('foo', 'bar', 'STARTED')
- finally:
- Producer.publish = prod
-
- def test_poll_no_messages(self):
- b = self.create_backend()
- assert b.get_task_meta(uuid())['status'] == states.PENDING
-
- @contextmanager
- def _result_context(self):
- results = Queue()
-
- class Message:
- acked = 0
- requeued = 0
-
- def __init__(self, **merge):
- self.payload = dict({'status': states.STARTED,
- 'result': None}, **merge)
- self.properties = {'correlation_id': merge.get('task_id')}
- self.body = pickle.dumps(self.payload)
- self.content_type = 'application/x-python-serialize'
- self.content_encoding = 'binary'
-
- def ack(self, *args, **kwargs):
- self.acked += 1
-
- def requeue(self, *args, **kwargs):
- self.requeued += 1
-
- class MockBinding:
-
- def __init__(self, *args, **kwargs):
- self.channel = Mock()
-
- def __call__(self, *args, **kwargs):
- return self
-
- def declare(self):
- pass
-
- def get(self, no_ack=False, accept=None):
- try:
- m = results.get(block=False)
- if m:
- m.accept = accept
- return m
- except Empty:
- pass
-
- def is_bound(self):
- return True
-
- class MockBackend(AMQPBackend):
- Queue = MockBinding
-
- backend = MockBackend(self.app, max_cached_results=100)
- backend._republish = Mock()
-
- yield results, backend, Message
-
- def test_backlog_limit_exceeded(self):
- with self._result_context() as (results, backend, Message):
- for i in range(1001):
- results.put(Message(task_id='id', status=states.RECEIVED))
- with pytest.raises(backend.BacklogLimitExceeded):
- backend.get_task_meta('id')
-
- def test_poll_result(self):
- with self._result_context() as (results, backend, Message):
- tid = uuid()
- # FFWD's to the latest state.
- state_messages = [
- Message(task_id=tid, status=states.RECEIVED, seq=1),
- Message(task_id=tid, status=states.STARTED, seq=2),
- Message(task_id=tid, status=states.FAILURE, seq=3),
- ]
- for state_message in state_messages:
- results.put(state_message)
- r1 = backend.get_task_meta(tid)
- # FFWDs to the last state.
- assert r1['status'] == states.FAILURE
- assert r1['seq'] == 3
-
- # Caches last known state.
- tid = uuid()
- results.put(Message(task_id=tid))
- backend.get_task_meta(tid)
- assert tid, backend._cache in 'Caches last known state'
-
- assert state_messages[-1].requeued
-
- # Returns cache if no new states.
- results.queue.clear()
- assert not results.qsize()
- backend._cache[tid] = 'hello'
- # returns cache if no new states.
- assert backend.get_task_meta(tid) == 'hello'
-
- def test_drain_events_decodes_exceptions_in_meta(self):
- tid = uuid()
- b = self.create_backend(serializer='json')
- b.store_result(tid, RuntimeError('aap'), states.FAILURE)
- result = AsyncResult(tid, backend=b)
-
- with pytest.raises(Exception) as excinfo:
- result.get()
-
- assert excinfo.value.__class__.__name__ == 'RuntimeError'
- assert str(excinfo.value) == 'aap'
-
- def test_no_expires(self):
- b = self.create_backend(expires=None)
- app = self.app
- app.conf.result_expires = None
- b = self.create_backend(expires=None)
- q = b._create_binding('foo')
- assert q.expires is None
-
- def test_process_cleanup(self):
- self.create_backend().process_cleanup()
-
- def test_reload_task_result(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().reload_task_result('x')
-
- def test_reload_group_result(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().reload_group_result('x')
-
- def test_save_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().save_group('x', 'x')
-
- def test_restore_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().restore_group('x')
-
- def test_delete_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().delete_group('x')
-
-
-class test_AMQPBackend_result_extended:
- def setup(self):
- self.app.conf.result_extended = True
-
- def test_store_result(self):
- b = AMQPBackend(self.app)
- tid = uuid()
-
- request = Context(args=(1, 2, 3), kwargs={'foo': 'bar'},
- task_name='mytask', retries=2,
- hostname='celery@worker_1',
- delivery_info={'routing_key': 'celery'})
-
- b.store_result(tid, {'fizz': 'buzz'}, states.SUCCESS, request=request)
-
- meta = b.get_task_meta(tid)
- assert meta == {
- 'args': [1, 2, 3],
- 'children': [],
- 'kwargs': {'foo': 'bar'},
- 'name': 'mytask',
- 'queue': 'celery',
- 'result': {'fizz': 'buzz'},
- 'retries': 2,
- 'status': 'SUCCESS',
- 'task_id': tid,
- 'traceback': None,
- 'worker': 'celery@worker_1',
- }
| Remove the AMQP result backend
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
We need to remove the AMQP result backend as it was marked as deprecated in 4.0.
A deprecation message was printed for our users and therefore we can remove it.
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
The AMQP result backend will no longer work and an error will be presented to the user.
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
When the backend is initialized, an `ImproperlyConfigured` exception is raised with the following message:
```
The AMQP result backend has been removed in 5.0.
Please use the RPC result backend or choose another result backend.
This message will be removed in 6.0.
```
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| 2020-09-23T13:31:16 |
|
celery/celery | 6,378 | celery__celery-6378 | [
"6363"
] | f05e82a32a737c4222ece0b446e7fb2fd8cc883f | diff --git a/celery/bin/base.py b/celery/bin/base.py
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -39,8 +39,7 @@ def __init__(self, app, no_color, workdir, quiet=False):
@cached_property
def OK(self):
- return self.style("OK", fg="green", bold=True) \
-
+ return self.style("OK", fg="green", bold=True)
@cached_property
def ERROR(self):
@@ -72,7 +71,7 @@ def error(self, message=None, **kwargs):
kwargs['color'] = False
click.echo(message, **kwargs)
else:
- click.echo(message, **kwargs)
+ click.secho(message, **kwargs)
def pretty(self, n):
if isinstance(n, list):
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -2,6 +2,7 @@
import os
import click
+import click.exceptions
from click.types import ParamType
from click_didyoumean import DYMGroup
@@ -104,7 +105,8 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
os.environ['CELERY_RESULT_BACKEND'] = result_backend
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
- ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir, quiet=quiet)
+ ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
+ quiet=quiet)
# User options
worker.params.extend(ctx.obj.app.user_options.get('worker', []))
@@ -139,6 +141,32 @@ def report(ctx):
celery.add_command(shell)
celery.add_command(multi)
+# Monkey-patch click to display a custom error
+# when -A or --app are used as sub-command options instead of as options
+# of the global command.
+
+previous_show_implementation = click.exceptions.NoSuchOption.show
+
+WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command:
+celery {info_name} {option_name} celeryapp <...>
+
+The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:
+celery {option_name} celeryapp {info_name} <...>"""
+
+
+def _show(self, file=None):
+ if self.option_name in ('-A', '--app'):
+ self.ctx.obj.error(
+ WRONG_APP_OPTION_USAGE_MESSAGE.format(
+ option_name=self.option_name,
+ info_name=self.ctx.info_name),
+ fg='red'
+ )
+ previous_show_implementation(self, file=file)
+
+
+click.exceptions.NoSuchOption.show = _show
+
def main() -> int:
"""Start celery umbrella command.
| celery worker command option -A is not working with version 5.0.0
Hi, I am running celery worker command as follows:-
pipenv run celery worker -A <celery_instance_file> -l info on windows OS.
I checked the version of celery module installed in python. It says 5.0.0.
#Python interpreter
import celery
celery.__version__
'5.0.0'
So the error is as follows:-
Usage: celery worker [OPTIONS]
Try 'celery worker --help' for help.
Error: no such option: -A
I checked the documentation of celery version 5.0.0, and it still has the option -A.
https://docs.celeryproject.org/en/latest/reference/cli.html
So let me know if this is limitation with windows or if this is a bug.
| Hi,
example celery run command
```bash
celery -A celery_main worker \
--loglevel INFO \
--without-gossip \
--without-mingle \
-O fair \
-P eventlet \
-c 10
```
I get the same error on Ubuntu 20.04.1 with Celery 5.0.0 upgrade.
Problem also occurs when using `--app`
I am passing App as first parameter after the celery binary:
```
ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
```
Ignore my previous comment here, apparently when I tried that it was erroneously with 4.4.7
I had the same problem in celery beat
```
celery v4.4.7 :
celery beat -A celery_main --loglevel=info
celery v5.0.0 :
celery -A celery_main beat -s celerybeat-schedule.db --loglevel INFO
``` | 2020-09-30T12:17:24 |
|
celery/celery | 6,394 | celery__celery-6394 | [
"6362"
] | 66d2ea51ca8dff22dc11e6fd6119a3beedd83b51 | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -302,6 +302,7 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
executable = params.pop('executable')
argv = ['-m', 'celery', 'worker']
for arg, value in params.items():
+ arg = arg.replace("_", "-")
if isinstance(value, bool) and value:
argv.append(f'--{arg}')
else:
| [5.0.0] Celery does NOT start with both -D and -E options enabled
Hi,
I've just installed Celery 5.0.0 and noticed that it doesn't start when both `-D` and `-E` options enabled:
Only `-E` options works fine:
```
$ celery -A sample.celery worker -E
-------------- [email protected] v5.0.0 (singularity)
--- ***** -----
-- ******* ---- Darwin-19.6.0-x86_64-i386-64bit 2020-09-24 15:59:59
- *** --- * ---
- ** ---------- [config]
- ** ---------- .> app: sample:0x7fc944afebd0
- ** ---------- .> transport: amqp://guest:**@192.168.0.101:5672//
- ** ---------- .> results: disabled://
- *** --- * --- .> concurrency: 4 (prefork)
-- ******* ---- .> task events: ON
--- ***** -----
-------------- [queues]
.> celery exchange=celery(direct) key=celery
```
But both `-D` and `-E` options doesn't work. It exits with exit code 0 and nothing happens...
```
$ celery -A sample.celery worker -D -E
```
**Info:**
Python 3.7.9
Celery 5.0.0 on macOS 10.15 Catalina.
| celery has some breaking changes in v5 please check them
> celery has some breaking changes in v5 please check them
This sounds like a bug.
When detaching, all options should preserve.
I just checked it myself. This is indeed a bug.
I'm looking into it.
The problem is where we reconstruct the arguments for the detached process.
I'm on it. | 2020-10-06T16:28:49 |
|
celery/celery | 6,396 | celery__celery-6396 | [
"6395"
] | 66d2ea51ca8dff22dc11e6fd6119a3beedd83b51 | diff --git a/celery/bin/base.py b/celery/bin/base.py
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -220,6 +220,7 @@ def __init__(self):
super().__init__(('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL'))
def convert(self, value, param, ctx):
+ value = value.upper()
value = super().convert(value, param, ctx)
return mlevel(value)
| Using lowercase loglevel argument like in the documentation throws error
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
<!--
Please describe what's missing or incorrect about our documentation.
Include links and/or screenshots which will aid us to resolve the issue.
-->
When starting workers like [some examples in the documentation](https://docs.celeryproject.org/en/stable/userguide/workers.html#starting-the-worker) that uses lowercase `info` as argument to loglevel I get this error: `Error: Invalid value for '-l' / '--loglevel': invalid choice: info. (choose from DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL)`. I assume this is because of the change to Click in 5.0.
# Suggestions
<!-- Please provide us suggestions for how to fix the documentation -->
Review documentation for instances of lowercase loglevel being passed and update them to use uppercase.
| Actually this is a bug and not a documentation bug.
I didn't intend to break it. | 2020-10-06T17:25:41 |
|
celery/celery | 6,401 | celery__celery-6401 | [
"6397"
] | 8a92b7128bc921d4332fe01486accc243115aba8 | diff --git a/celery/apps/multi.py b/celery/apps/multi.py
--- a/celery/apps/multi.py
+++ b/celery/apps/multi.py
@@ -78,7 +78,7 @@ def __init__(self, args):
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
- rargs = list(self.args)
+ rargs = [arg for arg in self.args if arg]
pos = 0
while pos < len(rargs):
arg = rargs[pos]
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -10,7 +10,8 @@
from celery import concurrency
from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,
CeleryDaemonCommand, CeleryOption)
-from celery.platforms import EX_FAILURE, detached, maybe_drop_privileges
+from celery.platforms import (EX_FAILURE, EX_OK, detached,
+ maybe_drop_privileges)
from celery.utils.log import get_logger
from celery.utils.nodenames import default_nodename, host_format, node_format
@@ -99,6 +100,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
if executable is not None:
path = executable
os.execv(path, [path] + argv)
+ return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
@@ -107,7 +109,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
- return EX_FAILURE
+ return EX_FAILURE
@click.command(cls=CeleryDaemonCommand,
@@ -290,36 +292,23 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
"Unable to parse extra configuration from command line.\n"
f"Reason: {e}", ctx=ctx)
if kwargs.get('detach', False):
- params = ctx.params.copy()
- params.pop('detach')
- params.pop('logfile')
- params.pop('pidfile')
- params.pop('uid')
- params.pop('gid')
- umask = params.pop('umask')
- workdir = ctx.obj.workdir
- params.pop('hostname')
- executable = params.pop('executable')
- argv = ['-m', 'celery', 'worker']
- for arg, value in params.items():
- arg = arg.replace("_", "-")
- if isinstance(value, bool) and value:
- argv.append(f'--{arg}')
- else:
- if value is not None:
- argv.append(f'--{arg}')
- argv.append(str(value))
- return detach(sys.executable,
- argv,
- logfile=logfile,
- pidfile=pidfile,
- uid=uid, gid=gid,
- umask=umask,
- workdir=workdir,
- app=app,
- executable=executable,
- hostname=hostname)
- return
+ argv = ['-m', 'celery'] + sys.argv[1:]
+ if '--detach' in argv:
+ argv.remove('--detach')
+ if '-D' in argv:
+ argv.remove('-D')
+
+ return detach(sys.executable,
+ argv,
+ logfile=logfile,
+ pidfile=pidfile,
+ uid=uid, gid=gid,
+ umask=kwargs.get('umask', None),
+ workdir=kwargs.get('workdir', None),
+ app=app,
+ executable=kwargs.get('executable', None),
+ hostname=hostname)
+
maybe_drop_privileges(uid=uid, gid=gid)
worker = app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
| SystemD unit files don't work with Celery 5
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #6381
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.0.0 (singularity)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
celery report
software -> celery:5.0.0 (singularity) kombu:5.0.2 py:3.8.2
billiard:3.6.3.0 py-amqp:5.0.1
platform -> system:Linux arch:64bit, ELF
kernel version:5.4.0-48-generic imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 5.0.0
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
alabaster==0.7.12
amqp==5.0.1
appdirs==1.4.4
attrs==19.3.0
autoflake==1.3.1
autopep8==1.5.4
aws-xray-sdk==0.95
azure-common==1.1.5
azure-nspkg==3.0.2
azure-storage==0.36.0
azure-storage-common==1.1.0
azure-storage-nspkg==3.1.0
Babel==2.8.0
backcall==0.2.0
bandit==1.6.2
basho-erlastic==2.1.1
billiard==3.6.3.0
bleach==3.1.5
boto==2.49.0
boto3==1.13.26
botocore==1.16.26
bump2version==1.0.0
bumpversion==0.6.0
case==1.5.3
cassandra-driver==3.20.2
-e [email protected]:celery/celery.git@eab4bc3d28b5dc547bb6dfac981b01595c565c3a#egg=celery
certifi==2020.4.5.1
cffi==1.14.0
cfgv==3.1.0
chardet==3.0.4
click==7.1.2
click-didyoumean==0.0.3
click-repl==0.1.6
cmake-setuptools==0.1.3
codecov==2.1.8
colorama==0.4.3
couchbase==2.5.12
coverage==5.2
cryptography==3.0
DateTime==4.3
decorator==4.4.2
distlib==0.3.1
dnspython==1.16.0
docker==4.2.1
docutils==0.15.2
ecdsa==0.15
elasticsearch==7.8.1
ephem==3.7.7.1
eventlet==0.26.1
filelock==3.0.12
future==0.18.2
gevent==20.6.2
gitdb==4.0.5
GitPython==3.1.7
greenlet==0.4.16
identify==1.4.19
idna==2.9
imagesize==1.2.0
importlib-metadata==1.6.1
iniconfig==1.0.1
ipython==7.17.0
ipython-genutils==0.2.0
isort==5.0.9
jedi==0.17.2
jeepney==0.4.3
Jinja2==2.11.2
jmespath==0.10.0
jsondiff==1.1.1
jsonpickle==1.4.1
keyring==21.2.1
kombu==5.0.2
linecache2==1.0.0
MarkupSafe==1.1.1
mock==4.0.2
monotonic==1.5
more-itertools==8.3.0
moto==1.3.7
msgpack==1.0.0
nodeenv==1.4.0
nose==1.3.7
packaging==20.4
parso==0.7.1
pbr==5.4.5
pexpect==4.8.0
pickleshare==0.7.5
pkginfo==1.5.0.1
pluggy==0.13.1
pre-commit==2.5.1
prompt-toolkit==3.0.7
ptyprocess==0.6.0
py==1.9.0
pyaml==20.4.0
pyArango==1.3.4
pycodestyle==2.6.0
pycouchdb==1.14.1
pycparser==2.20
pycryptodome==3.9.7
pycurl==7.43.0.5
pydocumentdb==2.3.2
pyflakes==2.2.0
Pygments==2.6.1
pylibmc==1.6.1
pymongo==3.11.0
pyparsing==2.4.7
pytest==6.0.1
pytest-celery==0.0.0a1
pytest-cov==2.10.0
pytest-rerunfailures==9.0
pytest-sugar==0.9.4
pytest-timeout==1.4.2
pytest-travis-fold==1.3.0
python-consul==1.1.0
python-dateutil==2.8.1
python-jose==2.0.2
python-memcached==1.59
python3-protobuf==2.5.0
pytz==2020.1
pyupgrade==2.6.2
PyYAML==5.3.1
readme-renderer==26.0
redis==3.5.3
requests==2.23.0
requests-toolbelt==0.9.1
responses==0.10.14
rfc3986==1.4.0
riak==2.7.0
s3transfer==0.3.3
SecretStorage==3.1.2
simplejson==3.17.2
six==1.15.0
smmap==3.0.4
snowballstemmer==2.0.0
softlayer-messaging==1.0.3
Sphinx==3.0.4
sphinx-celery==2.0.0
sphinx-click==2.5.0
sphinx-testing==0.7.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==1.0.3
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.4
SQLAlchemy==1.3.18
stevedore==3.2.0
tblib==1.7.0
termcolor==1.1.0
tokenize-rt==4.0.0
toml==0.10.1
tox==3.20.0
tqdm==4.48.0
traceback2==1.4.0
traitlets==4.3.3
twine==3.2.0
typer==0.3.0
unittest2==1.1.0
urllib3==1.25.9
vine==5.0.0
virtualenv==20.0.31
wcwidth==0.2.5
webencodings==0.5.1
websocket-client==0.57.0
Werkzeug==1.0.1
wrapt==1.12.1
xar==19.4.22
xmltodict==0.12.0
zipp==3.1.0
zope.event==4.4
zope.interface==5.1.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
A recent version of SystemD.
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
The service should start without error.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
The service crashes with the following error:
```
okt 06 18:13:53 nezzybuild systemd[1]: Started Celery Service.
okt 06 18:13:53 nezzybuild systemd[1]: celery.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
okt 06 18:13:53 nezzybuild systemd[1]: celery.service: Failed with result 'exit-code'.
```
| @mfonville Can you please share your `celery.conf` file?
Also could you please check journald?
Paste the exact output of `journalctl -xe --unit celery.service`.
Mine is (as expected):
```
Oct 06 21:01:46 ubuntu systemd[1]: Starting Celery Service...
-- Subject: A start job for unit celery.service has begun execution
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- A start job for unit celery.service has begun execution.
--
-- The job identifier is 6969.
Oct 06 21:01:46 ubuntu celery[72244]: Usage: celery [OPTIONS] COMMAND [ARGS]...
Oct 06 21:01:46 ubuntu celery[72244]: Error: Invalid value for '-A' / '--app': No module named 'proj'
Oct 06 21:01:46 ubuntu systemd[1]: celery.service: Control process exited, code=exited, status=2/INVALIDARGUMENT
```
celery.conf (masked the name of my django app):
```
# Name of nodes to start
# here we have a single node
CELERYD_NODES="w1"
# or we could have three nodes:
#CELERYD_NODES="w1 w2 w3"
# Absolute or relative path to the 'celery' command:
CELERY_BIN="/opt/mydjangoapp/env/bin/celery"
# App instance to use
# comment out this line if you don't use an app
CELERY_APP="mydjangoapp"
# or fully qualified:
#CELERY_APP="proj.tasks:app"
# How to call manage.py
CELERYD_MULTI="multi"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
# - %n will be replaced with the first part of the nodename.
# - %I will be replaced with the current child process index
# and is important when using the prefork pool to avoid race conditions.
CELERYD_PID_FILE="/var/run/celery/%n.pid"
CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
CELERYD_LOG_LEVEL="INFO"
# you may wish to add these options for Celery Beat
CELERYBEAT_PID_FILE="/var/run/celery/beat.pid"
CELERYBEAT_LOG_FILE="/var/log/celery/beat.log"
# For Celery 5 beat with django
DJANGO_SETTINGS_MODULE=mydjangoapp.settings
CELERYD_CHDIR="/opt/mdyjangoapp"
```
And the log:
```
-- The job identifier is 1177 and the job result is done.
okt 06 21:13:09 nezzybuild systemd[1]: Starting Celery Service...
-- Subject: A start job for unit celery.service has begun execution
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- A start job for unit celery.service has begun execution.
--
-- The job identifier is 1177.
okt 06 21:13:09 nezzybuild sh[4802]: celery multi v5.0.0 (singularity)
okt 06 21:13:09 nezzybuild sh[4802]: > Starting nodes...
okt 06 21:13:09 nezzybuild sh[4802]: > w1@nezzybuild: OK
okt 06 21:13:09 nezzybuild systemd[1]: Started Celery Service.
-- Subject: A start job for unit celery.service has finished successfully
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- A start job for unit celery.service has finished successfully.
--
-- The job identifier is 1177.
okt 06 21:13:09 nezzybuild systemd[1]: celery.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
-- Subject: Unit process exited
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- An ExecStart= process belonging to unit celery.service has exited.
--
-- The process' exit code is 'exited' and its exit status is 2.
okt 06 21:13:09 nezzybuild systemd[1]: celery.service: Failed with result 'exit-code'.
-- Subject: Unit failed
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- The unit celery.service has entered the 'failed' state with result 'exit-code'.
lines 2944-2979/2979 (END)
```
(with the patch from #6388 )
I think that your problem is due to #6362.
What happens if you remove `--time-limit=300`?
The worker starts correctly, right?
Even without `--time-limit` (but without any other parameters removed) it gives an error:
```
-- The job identifier is 4893 and the job result is done.
okt 07 11:54:35 nezzybuild systemd[1]: Starting Celery Service...
-- Subject: A start job for unit celery.service has begun execution
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- A start job for unit celery.service has begun execution.
--
-- The job identifier is 4893.
okt 07 11:54:36 nezzybuild sh[243146]: celery multi v5.0.0 (singularity)
okt 07 11:54:36 nezzybuild sh[243146]: > Starting nodes...
okt 07 11:54:36 nezzybuild sh[243146]: > w1@nezzybuild: OK
okt 07 11:54:36 nezzybuild systemd[1]: Started Celery Service.
-- Subject: A start job for unit celery.service has finished successfully
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- A start job for unit celery.service has finished successfully.
--
-- The job identifier is 4893.
okt 07 11:54:36 nezzybuild systemd[1]: celery.service: Main process exited, code=exited, status=2/INVALIDARGUMENT
-- Subject: Unit process exited
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- An ExecStart= process belonging to unit celery.service has exited.
--
-- The process' exit code is 'exited' and its exit status is 2.
okt 07 11:54:36 nezzybuild systemd[1]: celery.service: Failed with result 'exit-code'.
-- Subject: Unit failed
-- Defined-By: systemd
-- Support: http://www.ubuntu.com/support
--
-- The unit celery.service has entered the 'failed' state with result 'exit-code'.
```
Is there something in Celery's log file that indicates why the process exited?
No, nothing visible in the log :-/
Is it maybe an idea to let the the command itself report in its output the parameters it has interpreted? That way the user can verify any discrepancies.
And maybe there is someone from the Click-project that could take a look (e.g. asking them on their Discord?) whether they have any suggestions. Because I have the feeling that with `multi` but also some other commands that allow a set of recursive options Click might have some more straightforward solutions?
No. This is definitely our problem and I now know why this happens.
I'm working on a fix.
I have found some problems but not the problem.
I think we should take a closer look at our process detaching code.
For future reference this command exits without error but doesn't really start the process:
```
/home/thedrow/.virtualenvs/celery/bin/python -m celery -A myapp worker --detach --pidfile=/home/thedrow/Documents/Projects/celery/examples/app/worker.pid --logfile=/home/thedrow/Documents/Projects/celery/examples/app/worker%I.log --loglevel=INFO -n worker@ubuntu --executable=/home/thedrow/.virtualenvs/celery/bin/python
```
Your working directory should be `/path/to/git/clone/celery/examples/app`. | 2020-10-08T15:54:36 |
|
celery/celery | 6,416 | celery__celery-6416 | [
"6414"
] | 60ba37900a038420aec0fc76e60c55989f66c718 | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -206,6 +206,8 @@ class name.
task_cls = 'celery.app.task:Task'
registry_cls = 'celery.app.registry:TaskRegistry'
+ #: Thread local storage.
+ _local = None
_fixups = None
_pool = None
_conf = None
@@ -229,6 +231,9 @@ def __init__(self, main=None, loader=None, backend=None,
changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, namespace=None, strict_typing=True,
**kwargs):
+
+ self._local = threading.local()
+
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
@@ -727,7 +732,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
task_id, name, args, kwargs, countdown, eta, group_id, group_index,
expires, retries, chord,
maybe_list(link), maybe_list(link_error),
- reply_to or self.oid, time_limit, soft_time_limit,
+ reply_to or self.thread_oid, time_limit, soft_time_limit,
self.conf.task_send_sent_event,
root_id, parent_id, shadow, chain,
argsrepr=options.get('argsrepr'),
@@ -1185,15 +1190,28 @@ def oid(self):
# which would not work if each thread has a separate id.
return oid_from(self, threads=False)
+ @property
+ def thread_oid(self):
+ """Per-thread unique identifier for this app."""
+ try:
+ return self._local.oid
+ except AttributeError:
+ self._local.oid = new_oid = oid_from(self, threads=True)
+ return new_oid
+
@cached_property
def amqp(self):
"""AMQP related functionality: :class:`~@amqp`."""
return instantiate(self.amqp_cls, app=self)
- @cached_property
+ @property
def backend(self):
"""Current backend instance."""
- return self._get_backend()
+ try:
+ return self._local.backend
+ except AttributeError:
+ self._local.backend = new_backend = self._get_backend()
+ return new_backend
@property
def conf(self):
diff --git a/celery/backends/rpc.py b/celery/backends/rpc.py
--- a/celery/backends/rpc.py
+++ b/celery/backends/rpc.py
@@ -338,5 +338,5 @@ def binding(self):
@cached_property
def oid(self):
- # cached here is the app OID: name of queue we receive results on.
- return self.app.oid
+ # cached here is the app thread OID: name of queue we receive results on.
+ return self.app.thread_oid
diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -296,7 +296,7 @@ def freeze(self, _id=None, group_id=None, chord=None,
if parent_id:
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
- opts['reply_to'] = self.app.oid
+ opts['reply_to'] = self.app.thread_oid
if group_id and "group_id" not in opts:
opts['group_id'] = group_id
if chord:
| diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -2,6 +2,7 @@
import itertools
import os
import ssl
+import uuid
from copy import deepcopy
from datetime import datetime, timedelta
from pickle import dumps, loads
@@ -17,6 +18,7 @@
from celery.app import base as _appbase
from celery.app import defaults
from celery.exceptions import ImproperlyConfigured
+from celery.backends.base import Backend
from celery.loaders.base import unconfigured
from celery.platforms import pyimplementation
from celery.utils.collections import DictAttribute
@@ -987,6 +989,63 @@ class CustomCelery(type(self.app)):
app = CustomCelery(set_as_current=False)
assert isinstance(app.tasks, TaskRegistry)
+ def test_oid(self):
+ # Test that oid is global value.
+ oid1 = self.app.oid
+ oid2 = self.app.oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_global_oid(self):
+ # Test that oid is global value also within threads
+ main_oid = self.app.oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid == thread_oid
+
+ def test_thread_oid(self):
+ # Test that thread_oid is global value in single thread.
+ oid1 = self.app.thread_oid
+ oid2 = self.app.thread_oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_backend(self):
+ # Test that app.bakend returns the same backend in single thread
+ backend1 = self.app.backend
+ backend2 = self.app.backend
+ assert isinstance(backend1, Backend)
+ assert isinstance(backend2, Backend)
+ assert backend1 is backend2
+
+ def test_thread_backend(self):
+ # Test that app.bakend returns the new backend for each thread
+ main_backend = self.app.backend
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.backend)
+ thread_backend = future.result()
+ assert isinstance(main_backend, Backend)
+ assert isinstance(thread_backend, Backend)
+ assert main_backend is not thread_backend
+
+ def test_thread_oid_is_local(self):
+ # Test that thread_oid is local to thread.
+ main_oid = self.app.thread_oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.thread_oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid != thread_oid
+
class test_defaults:
diff --git a/t/unit/backends/test_rpc.py b/t/unit/backends/test_rpc.py
--- a/t/unit/backends/test_rpc.py
+++ b/t/unit/backends/test_rpc.py
@@ -1,3 +1,4 @@
+import uuid
from unittest.mock import Mock, patch
import pytest
@@ -28,8 +29,22 @@ def setup(self):
def test_oid(self):
oid = self.b.oid
oid2 = self.b.oid
+ assert uuid.UUID(oid)
assert oid == oid2
- assert oid == self.app.oid
+ assert oid == self.app.thread_oid
+
+ def test_oid_threads(self):
+ # Verify that two RPC backends executed in different threads
+ # has different oid.
+ oid = self.b.oid
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: RPCBackend(app=self.app).oid)
+ thread_oid = future.result()
+ assert uuid.UUID(oid)
+ assert uuid.UUID(thread_oid)
+ assert oid == self.app.thread_oid
+ assert thread_oid != oid
def test_interface(self):
self.b.on_reply_declare('task_id')
diff --git a/t/unit/tasks/test_chord.py b/t/unit/tasks/test_chord.py
--- a/t/unit/tasks/test_chord.py
+++ b/t/unit/tasks/test_chord.py
@@ -1,5 +1,5 @@
from contextlib import contextmanager
-from unittest.mock import Mock, patch, sentinel
+from unittest.mock import Mock, patch, sentinel, PropertyMock
import pytest
@@ -294,9 +294,8 @@ def adds(self, sig, lazy=False):
return self.add_to_chord(sig, lazy)
self.adds = adds
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_add_to_chord(self):
- self.app.backend = Mock(name='backend')
-
sig = self.add.s(2, 2)
sig.delay = Mock(name='sig.delay')
self.adds.request.group = uuid()
@@ -333,8 +332,8 @@ def test_add_to_chord(self):
class test_Chord_task(ChordCase):
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_run(self):
- self.app.backend = Mock()
self.app.backend.cleanup = Mock()
self.app.backend.cleanup.__name__ = 'cleanup'
Chord = self.app.tasks['celery.chord']
diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py
--- a/t/unit/tasks/test_result.py
+++ b/t/unit/tasks/test_result.py
@@ -708,19 +708,19 @@ def test_get_nested_without_native_join(self):
]),
]),
])
- ts.app.backend = backend
- vals = ts.get()
- assert vals == [
- '1.1',
- [
- '2.1',
+ with patch('celery.Celery.backend', new=backend):
+ vals = ts.get()
+ assert vals == [
+ '1.1',
[
- '3.1',
- '3.2',
- ]
- ],
- ]
+ '2.1',
+ [
+ '3.1',
+ '3.2',
+ ]
+ ],
+ ]
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
@@ -771,15 +771,16 @@ def test_join_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- res = ts.join_native()
- assert res == list(range(10))
- callback = Mock(name='callback')
- assert not ts.join_native(callback=callback)
- callback.assert_has_calls([
- call(r.id, i) for i, r in enumerate(ts.results)
- ])
+
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ res = ts.join_native()
+ assert res == list(range(10))
+ callback = Mock(name='callback')
+ assert not ts.join_native(callback=callback)
+ callback.assert_has_calls([
+ call(r.id, i) for i, r in enumerate(ts.results)
+ ])
def test_join_native_raises(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
@@ -813,9 +814,9 @@ def test_iter_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- assert len(list(ts.iter_native())) == 10
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ assert len(list(ts.iter_native())) == 10
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
diff --git a/t/unit/test_canvas.py b/t/unit/test_canvas.py
new file mode 100644
--- /dev/null
+++ b/t/unit/test_canvas.py
@@ -0,0 +1,33 @@
+import uuid
+
+
+class test_Canvas:
+
+ def test_freeze_reply_to(self):
+ # Tests that Canvas.freeze() correctly
+ # creates reply_to option
+
+ @self.app.task
+ def test_task(a, b):
+ return
+
+ s = test_task.s(2, 2)
+ s.freeze()
+
+ from concurrent.futures import ThreadPoolExecutor
+
+ def foo():
+ s = test_task.s(2, 2)
+ s.freeze()
+ return self.app.thread_oid, s.options['reply_to']
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(foo)
+ t_reply_to_app, t_reply_to_opt = future.result()
+
+ assert uuid.UUID(s.options['reply_to'])
+ assert uuid.UUID(t_reply_to_opt)
+ # reply_to must be equal to thread_oid of Application
+ assert self.app.thread_oid == s.options['reply_to']
+ assert t_reply_to_app == t_reply_to_opt
+ # reply_to must be thread-relative.
+ assert t_reply_to_opt != s.options['reply_to']
| Celery backends are not thread-safe
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
#### Related Issues
https://github.com/celery/py-amqp/issues/330
#### Possible Duplicates
- #1779, #2066
</p>
</details>
## Minimally Reproducible Test Case
See https://github.com/celery/py-amqp/issues/330#issuecomment-706148277
# Expected Behavior
Celery should be able to be called from multiple threads - e.g. from gunicorn container
# Actual Behavior
Due race conditions inside backends, Celery serving data is stalled or it returns "0x01 while expecting 0xce" errors, or others (depends on backend) - see https://github.com/celery/py-amqp/issues/330, #1779, #2066
# Details
The issue is caused by sharing resources between threads. This issue can be broken into 2 separate sub-issues:
1. Underlying Connection is shared between threads:
When `rpc` backend is used, kombu/py-amqp backend is used. This backend supports **only one connection per thread**. Unfortunately since using single backend object in all threads, all threads shares single connections causing "0x01 while expecting 0xce" errors or other creashes:
https://github.com/celery/celery/blob/05da357502a109c05b35392391299d75d181ccab/celery/app/base.py#L1166-L1167
2. Single `oid` is shared between threads:
This unique identifier is used to define reply-to queue of `rpc` backend. Again, since the `oid` is global it is caused that **all** threads are having **single** result queue causing not receiving responses:
https://github.com/celery/celery/blob/05da357502a109c05b35392391299d75d181ccab/celery/app/base.py#L1152-L1153
# Possible solution
Store these global variables to thread local storage:
https://docs.python.org/3/library/threading.html#thread-local-data
| 2020-10-15T17:48:22 |
|
celery/celery | 6,419 | celery__celery-6419 | [
"6186"
] | 05da357502a109c05b35392391299d75d181ccab | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -428,6 +428,7 @@ def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
+ '__annotations__': fun.__annotations__,
'__header__': staticmethod(head_from_fun(fun, bound=bind)),
'__wrapped__': run}, **options))()
# for some reason __qualname__ cannot be set in type()
| diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -494,6 +494,16 @@ def foo():
finally:
_imports.MP_MAIN_FILE = None
+ def test_can_get_type_hints_for_tasks(self):
+ import typing
+
+ with self.Celery() as app:
+ @app.task
+ def foo(parameter: int) -> None:
+ pass
+
+ assert typing.get_type_hints(foo) == {'parameter': int, 'return': type(None)}
+
def test_annotate_decorator(self):
from celery.app.task import Task
| @app.task should copy __annotations__ from the task function like @functools.wraps
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Issue+Type%3A+Feature+Request%22+)
for similar or identical feature requests.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22PR+Type%3A+Feature%22+)
for existing proposed implementations of this feature.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same feature was already implemented in the
master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the feature does
and why it is needed.
-->
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this feature is going to behave.
Describe what happens in case of failures as well if applicable.
-->
Celery tasks created with `@app.task` should support `typing.get_type_hints(my_celery_task)`.
It can be solved by copying the `__annotations__` from the decorated function when available. This is done by `functools.wraps()` in Python 3.0+, if I understand [PEP 3107](https://www.python.org/dev/peps/pep-3107/) correctly. Without this `typing.get_type_hints(my_celery_task)` raises an exception.
It is my understanding that some other similar properties that `functools.wraps` handles are already copied: https://github.com/celery/celery/blob/d3863d909759f1fd618f2a1af1766ce54c16d39b/celery/app/base.py#L453-L454
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be introduced for this feature.
-->
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this feature such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| we will consider this in near future
Thanks! Please let me know if help is needed for this to be implemented.
for celery 4.5 which will be python 3.6+ only
For anyone finding this before it's fixed, I have the following work-around:
```
@functools.wraps(decorated_function, assigned=('__annotations__',))
@celery_app.task(bind=True, base=base, priority=priority, name=name)
def task(): ...
```
I think that's certainly desirable for Celery 5. We can also backport to 4.x.
@davidparsson Care to help us with that?
@thedrow, I guess you're asking me although you pinged someone else. Yes, I'll give it a shot, but it might be a few weeks until I find the time.
I mentioned the wrong guy by accident. My apologies.
Edited for clarity. | 2020-10-16T09:06:10 |
celery/celery | 6,440 | celery__celery-6440 | [
"6361",
"6361"
] | a2498d37aa40614a2eecb3dddcae61754056b5c9 | diff --git a/celery/app/amqp.py b/celery/app/amqp.py
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -46,7 +46,6 @@ class Queues(dict):
create_missing (bool): By default any unknown queues will be
added automatically, but if this flag is disabled the occurrence
of unknown queues in `wanted` will raise :exc:`KeyError`.
- ha_policy (Sequence, str): Default HA policy for queues with none set.
max_priority (int): Default x-max-priority for queues with none set.
"""
@@ -55,14 +54,13 @@ class Queues(dict):
_consume_from = None
def __init__(self, queues=None, default_exchange=None,
- create_missing=True, ha_policy=None, autoexchange=None,
+ create_missing=True, autoexchange=None,
max_priority=None, default_routing_key=None):
dict.__init__(self)
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.default_routing_key = default_routing_key
self.create_missing = create_missing
- self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
self.max_priority = max_priority
if queues is not None and not isinstance(queues, Mapping):
@@ -122,10 +120,6 @@ def _add(self, queue):
queue.exchange = self.default_exchange
if not queue.routing_key:
queue.routing_key = self.default_routing_key
- if self.ha_policy:
- if queue.queue_arguments is None:
- queue.queue_arguments = {}
- self._set_ha_policy(queue.queue_arguments)
if self.max_priority is not None:
if queue.queue_arguments is None:
queue.queue_arguments = {}
@@ -133,13 +127,6 @@ def _add(self, queue):
self[queue.name] = queue
return queue
- def _set_ha_policy(self, args):
- policy = self.ha_policy
- if isinstance(policy, (list, tuple)):
- return args.update({'ha-mode': 'nodes',
- 'ha-params': list(policy)})
- args['ha-mode'] = policy
-
def _set_max_priority(self, args):
if 'x-max-priority' not in args and self.max_priority is not None:
return args.update({'x-max-priority': self.max_priority})
@@ -251,7 +238,7 @@ def create_task_message(self):
def send_task_message(self):
return self._create_task_sender()
- def Queues(self, queues, create_missing=None, ha_policy=None,
+ def Queues(self, queues, create_missing=None,
autoexchange=None, max_priority=None):
# Create new :class:`Queues` instance, using queue defaults
# from the current configuration.
@@ -259,8 +246,6 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
default_routing_key = conf.task_default_routing_key
if create_missing is None:
create_missing = conf.task_create_missing_queues
- if ha_policy is None:
- ha_policy = conf.task_queue_ha_policy
if max_priority is None:
max_priority = conf.task_queue_max_priority
if not queues and conf.task_default_queue:
@@ -271,7 +256,7 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
else autoexchange)
return self.queues_cls(
queues, self.default_exchange, create_missing,
- ha_policy, autoexchange, max_priority, default_routing_key,
+ autoexchange, max_priority, default_routing_key,
)
def Router(self, queues=None, create_missing=None):
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -267,7 +267,6 @@ def __repr__(self):
type='dict', old={'celery_task_publish_retry_policy'},
),
queues=Option(type='dict'),
- queue_ha_policy=Option(None, type='string'),
queue_max_priority=Option(None, type='int'),
reject_on_worker_lost=Option(type='bool'),
remote_tracebacks=Option(False, type='bool'),
| diff --git a/t/unit/app/test_amqp.py b/t/unit/app/test_amqp.py
--- a/t/unit/app/test_amqp.py
+++ b/t/unit/app/test_amqp.py
@@ -89,23 +89,6 @@ def test_setitem_adds_default_exchange(self):
q['foo'] = queue
assert q['foo'].exchange == q.default_exchange
- @pytest.mark.parametrize('ha_policy,qname,q,qargs,expected', [
- (None, 'xyz', 'xyz', None, None),
- (None, 'xyz', 'xyz', {'x-foo': 'bar'}, {'x-foo': 'bar'}),
- ('all', 'foo', Queue('foo'), None, {'ha-mode': 'all'}),
- ('all', 'xyx2',
- Queue('xyx2', queue_arguments={'x-foo': 'bar'}),
- None,
- {'ha-mode': 'all', 'x-foo': 'bar'}),
- (['A', 'B', 'C'], 'foo', Queue('foo'), None, {
- 'ha-mode': 'nodes',
- 'ha-params': ['A', 'B', 'C']}),
- ])
- def test_with_ha_policy(self, ha_policy, qname, q, qargs, expected):
- queues = Queues(ha_policy=ha_policy, create_missing=False)
- queues.add(q, queue_arguments=qargs)
- assert queues[qname].queue_arguments == expected
-
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
@@ -118,11 +101,6 @@ def test_deselect(self):
q.deselect('bar')
assert sorted(q._consume_from.keys()) == ['foo']
- def test_with_ha_policy_compat(self):
- q = Queues(ha_policy='all')
- q.add('bar')
- assert q['bar'].queue_arguments == {'ha-mode': 'all'}
-
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
@@ -143,12 +121,6 @@ def test_alias(self):
({'max_priority': 10},
'moo', Queue('moo', queue_arguments=None),
{'x-max-priority': 10}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'bar', 'bar',
- {'ha-mode': 'all', 'x-max-priority': 5}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'xyx2', Queue('xyx2', queue_arguments={'x-max-priority': 2}),
- {'ha-mode': 'all', 'x-max-priority': 2}),
({'max_priority': None},
'foo2', 'foo2',
None),
@@ -255,10 +227,6 @@ def test_countdown_negative(self):
with pytest.raises(ValueError):
self.app.amqp.as_task_v2(uuid(), 'foo', countdown=-1232132323123)
- def test_Queues__with_ha_policy(self):
- x = self.app.amqp.Queues({}, ha_policy='all')
- assert x.ha_policy == 'all'
-
def test_Queues__with_max_priority(self):
x = self.app.amqp.Queues({}, max_priority=23)
assert x.max_priority == 23
| RabbitMQ task_queue_ha_policy has no effect
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
The RabbitMQ setting [task_queue_ha_policy](https://docs.celeryproject.org/en/stable/userguide/configuration.html#task-queue-ha-policy) no longer has any affect, as [described in the release notes for version 3](https://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0/).
This was [reported to the mailing list here](https://groups.google.com/u/1/g/celery-users/c/hiZPiz2JWo8/m/2_q_Q5sM0BIJ) (in 2013!) and the answer then was that it's deprecated and will be removed.
# Suggestions
Remove the configuration parameter and maybe reference the blog post and/or docs on how to set a policy.
RabbitMQ task_queue_ha_policy has no effect
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
The RabbitMQ setting [task_queue_ha_policy](https://docs.celeryproject.org/en/stable/userguide/configuration.html#task-queue-ha-policy) no longer has any affect, as [described in the release notes for version 3](https://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0/).
This was [reported to the mailing list here](https://groups.google.com/u/1/g/celery-users/c/hiZPiz2JWo8/m/2_q_Q5sM0BIJ) (in 2013!) and the answer then was that it's deprecated and will be removed.
# Suggestions
Remove the configuration parameter and maybe reference the blog post and/or docs on how to set a policy.
| contributions are wellcome
@auvipy @safwanrahman The fix that has been merged here does not address the problem.
The [RabbitMQ release notes](https://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0/) describe how **it is no longer possible** to configure HA at queue declaration time. You have to use a policy, which is set on the broker and cannot be controlled by the client.
contributions are wellcome
@auvipy @safwanrahman The fix that has been merged here does not address the problem.
The [RabbitMQ release notes](https://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0/) describe how **it is no longer possible** to configure HA at queue declaration time. You have to use a policy, which is set on the broker and cannot be controlled by the client.
| 2020-10-26T20:45:36 |
celery/celery | 6,447 | celery__celery-6447 | [
"6386"
] | 7c3da03a07882ca86b801ad78dd509a67cba60af | diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -84,6 +84,7 @@ def unpack_from(fmt, iobuf, unpack=unpack): # noqa
SCHED_STRATEGIES = {
None: SCHED_STRATEGY_FAIR,
+ 'default': SCHED_STRATEGY_FAIR,
'fast': SCHED_STRATEGY_FCFS,
'fcfs': SCHED_STRATEGY_FCFS,
'fair': SCHED_STRATEGY_FAIR,
| worker with concurrency running only a single task while multiple tasks reserved
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [x] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery Version**: 5.0.0 (singularity)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
root@ad36e1cf0d81:/app# celery -A app.worker report
software -> celery:5.0.0 (singularity) kombu:5.0.2 py:3.7.5
billiard:3.6.3.0 py-amqp:5.0.1
platform -> system:Linux arch:64bit
kernel version:5.4.0-1025-aws imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:disabled
broker_url: 'amqp://prediction_celery:********@broker:5672//'
deprecated_settings: None
task_routes: {
'app.worker.*': {'queue': 'high_memory_usage'}}
task_serializer: 'pickle'
accept_content: ['json', 'pickle']
broker_transport_options: {
'interval_max': 0.5,
'interval_start': 0,
'interval_step': 0.2,
'max_retries': 3}
worker_prefetch_multiplier: 1
software -> celery:5.0.0 (singularity) kombu:5.0.2 py:3.7.5
billiard:3.6.3.0 py-amqp:5.0.1
platform -> system:Linux arch:64bit
kernel version:5.4.0-1025-aws imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: 3.7.5
* **Minimal Celery Version**: 5.0.0
* **Minimal Kombu Version**: 5.0.2
* **Minimal Broker Version**: RabbitMQ version: 3.8.9
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.1
bidalgo-common==0.0
billiard==3.6.3.0
boto3==1.10.45
botocore==1.13.45
celery==5.0.0
certifi==2020.6.20
chardet==3.0.4
click==7.1.2
click-didyoumean==0.0.3
click-repl==0.1.6
colormath==3.0.0
common==0.1
coverage==5.0.3
creative-analysis===-module.-creative-analysis.version-.from.-tmp-pip-req-build-v4szy4xr-creative-analysis-version.py-
custom-stats==0.1
cycler==0.10.0
dataclasses==0.6
dataclasses-json==0.3.7
decorator==4.4.2
docutils==0.15.2
idna==2.8
importlib-metadata==2.0.0
jmespath==0.10.0
joblib==0.16.0
kiwisolver==1.2.0
kombu==5.0.2
marshmallow==3.8.0
marshmallow-enum==1.5.1
matplotlib==3.1.1
mypy-extensions==0.4.3
mysqlclient==2.0.1
networkx==2.5
nlp-tools==0.0
nltk==3.4.5
numpy==1.16.5
pandas==0.25.1
patsy==0.5.1
Pillow==5.4.1
prompt-toolkit==3.0.7
pydantic==1.6.1
PyMySQL==0.9.3
pyparsing==2.4.7
python-dateutil==2.8.1
python-dotenv==0.10.3
pytz==2020.1
redis==3.3.11
requests==2.22.0
s3transfer==0.2.1
scikit-learn==0.21.3
scipy==1.3.1
six==1.15.0
SQLAlchemy==1.3.19
statsmodels==0.11.0
stringcase==1.2.0
text2digits==0.0.9
typing-extensions==3.7.4.3
typing-inspect==0.6.0
urllib3==1.25.10
vine==5.0.0
wcwidth==0.2.5
Werkzeug==1.0.1
zipp==3.2.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
Status of node rabbit@f1b3c114b8d6 ...
Runtime
OS PID: 275
OS: Linux
Uptime (seconds): 759
Is under maintenance?: false
RabbitMQ version: 3.8.9
Node name: rabbit@f1b3c114b8d6
Erlang configuration: Erlang/OTP 23 [erts-11.1] [source] [64-bit] [smp:2:2] [ds:2:2:10] [async-threads:64]
Erlang processes: 669 used, 1048576 limit
Scheduler run queue: 1
Cluster heartbeat timeout (net_ticktime): 60
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
task.py:
```
import os
from celery import Celery
from time import sleep
celery_app = Celery("worker", broker=os.environ['CELERY_BROKER_URL'])
@celery_app.task()
def run():
sleep(60)
return
```
run.py:
```
from task import run
if __name__ == '__main__':
run.apply_async()
run.apply_async()
```
1. IMPORTANT - Start broker - **bug happens only when first tasks run after broker starts.**
2. Run celery:
```$ celery -A task worker --pool=prefork --concurrency=2```
3. Run tasks:
```$ python run.py```
4. inspect active tasks:
```$ celery -A task inspect active```
5. inspect reserved tasks:
```$ celery -A task inspect reserved```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Worker is running 2 active tasks
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
The worker is running only 1 active task and you can see 1 reserved task.
Same test performed on 4.4.7 (cliffs) worked as expected
Even in 5.0.0 when you run again
3. Run tasks:
```$ python run.py```
4. inspect active tasks:
```$ celery -A task inspect active```
5. inspect reserved tasks:
```$ celery -A task inspect reserved```
It will work as expected - you will see 2 active tasks and 0 reserved
| Seeing the same on Ubuntu 20 and Python 3.7.
Started testing v5 today and found my performance tanked.
Facing the same issue on ubuntu 16.04 and python3.6.
Celery v5.0.0 executes a single task while others sit idle as reserved. Shifting back to 4.4.7 gives the expected behavior.
I'll try to reproduce the problem.
> Started testing v5 today and found my performance tanked.
In the future, if you can, please try our release candidates.
I'd rather we find these issues before GA. :smile:
Seeing the same here in the Python 3.6.10 Docker image
Seeing the same issue with oraculum ( https://pagure.io/fedora-qa/oraculum ).
Celery 4.4.7 works as expected. Only one worker is getting tasks done with Celery 5.0.0 . We're using redis backend for celery.
Same here. When is this fix expected to be released?
Nvm this is just an issue, not an MR.
Same here, returned to 4.4.7 and the work can run tasks concurrently. Tested in CentOS 7 - Python 3.6.8 and macOS 10.14 - Python 3.7.1.
My celery code:
```
import time
from celery import Celery
app = Celery('hello', broker='redis://localhost:6379/3')
@app.task
def long(x):
print(f'sleep {x}')
time.sleep(x)
print('done sleep')
```
With 5.0.1:

With 4.4.7:

Took me two days to realize this is not a problem about configuration, system, and my knowledge about celery. 🤦♂️
I also experienced this problem. Upgraded from 4.4.7 to 5.0.1 and performance plummeted because workers aren't executing tasks concurrently. Reverted back to 4.4.7 and the problem went away immediately.
That's kinda of a breaking issue. Any chance that this could be addressed as a separate small bugfix release? 5.0.2 with just this fix?
I've been digging into this today and have some observations:
* `inspect active` shows that only a single worker PID is running tasks
* sometimes two tasks will run concurrently with different worker PIDs if the task queue was empty
* after tasks begin to serialize, only one worker will handle tasks
* On my box with concurrency >= 6, worker 6 appears to always win whatever race to whatever mutex this is
* this is stable across restarts of the worker, I've not checked across reboots
* the other worker which sometimes gets tasks also seems to be deterministic - 2 for concurrency 6,7; 7 for concurrency > 7
* this misbehaviour is relevant to prefork only AFAICT - gevent and eventlet seem fine
I'm running a tweaked variant of @reorx's MRTC so I can do `celery -A app worker` and have it queue tasks for me at the same time as starting a worker, or running `python app.py` to queue more tasks.
```python
import sys
import time
from celery import Celery
app = Celery("app", broker="redis://", backend="redis://")
@app.task
def long_(x):
print(f"sleep {x}")
time.sleep(x)
print("done sleep")
if "inspect" not in sys.argv:
for _ in range(10):
long_.s(1).delay()
```
I am able to trick the prefork worker into running tasks concurrently if I shuffle the list of events returned by the kombu poller to the hub, prior to the event handling loop around
https://github.com/celery/kombu/blob/b01a448a4c85753600cd331bd8af618de4b0ee67/kombu/asynchronous/hub.py#L305
A bit of instrumentation around there suggest that the `poll_timeout` is being set to a pretty high value and we're seemingly always ending up choosing to deliver any pending request to the same one or two workers which happen to sort at the start of the list of events. This seems like some sort of failure to round robin in either the hub or celery's use of it, but I can't spot what could have caused it between 4.4.7 and now. Perhaps @thedrow or @matusvalo have some insight? | 2020-10-30T00:44:37 |
|
celery/celery | 6,452 | celery__celery-6452 | [
"6451"
] | f1145a2d91bd525f8e0f7a5662c9093e02fbf5a8 | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -8,7 +8,7 @@
from celery import current_app, group, states
from celery._state import _task_stack
-from celery.canvas import signature
+from celery.canvas import _chain, signature
from celery.exceptions import (Ignore, ImproperlyConfigured,
MaxRetriesExceededError, Reject, Retry)
from celery.local import class_property
@@ -880,6 +880,11 @@ def replace(self, sig):
link=self.request.callbacks,
link_error=self.request.errbacks,
)
+ elif isinstance(sig, _chain):
+ if not sig.tasks:
+ raise ImproperlyConfigured(
+ "Cannot replace with an empty chain"
+ )
if self.request.chain:
# We need to freeze the new signature with the current task's ID to
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -100,6 +100,11 @@ def replace_with_chain_which_raises(self, *args, link_msg=None):
return self.replace(c)
+@shared_task(bind=True)
+def replace_with_empty_chain(self, *_):
+ return self.replace(chain())
+
+
@shared_task(bind=True)
def add_to_all(self, nums, val):
"""Add the given value to all supplied numbers."""
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -6,7 +6,7 @@
from celery import chain, chord, group, signature
from celery.backends.base import BaseKeyValueStoreBackend
-from celery.exceptions import TimeoutError
+from celery.exceptions import ImproperlyConfigured, TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
from . import tasks
@@ -15,9 +15,10 @@
add_to_all, add_to_all_to_chord, build_chain_inside_task,
chord_error, collect_ids, delayed_sum,
delayed_sum_with_soft_guard, fail, identity, ids,
- print_unicode, raise_error, redis_echo, retry_once,
- return_exception, return_priority, second_order_replace1,
- tsum, replace_with_chain, replace_with_chain_which_raises)
+ print_unicode, raise_error, redis_echo,
+ replace_with_chain, replace_with_chain_which_raises,
+ replace_with_empty_chain, retry_once, return_exception,
+ return_priority, second_order_replace1, tsum)
RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError)
@@ -584,6 +585,13 @@ def test_chain_with_eb_replaced_with_chain_with_eb(self, manager):
assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
redis_connection.delete('redis-echo')
+ def test_replace_chain_with_empty_chain(self, manager):
+ r = chain(identity.s(1), replace_with_empty_chain.s()).delay()
+
+ with pytest.raises(ImproperlyConfigured,
+ match="Cannot replace with an empty chain"):
+ r.get(timeout=TIMEOUT)
+
class test_result_set:
| Unclear error when replacing with an empty chain
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #6189
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.0.1 (singularity)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.0.1 (singularity) kombu:5.0.2 py:3.9.0
billiard:3.6.3.0 py-amqp:5.0.1
platform -> system:Linux arch:64bit, ELF
kernel version:5.8.0-26-generic imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 4.x
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery import shared_task, chain
@shared_task
def foo(*args):
return 'foo'
@shared_task(bind=True)
def replace_with_empty_chain():
self.replace(chain())
chain(foo.s(), replace_with_empty_chain.s()).delay()
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
A clear error message is raised.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
An `IndexError` is raised with a message that doesn't explain what went wrong.
| 2020-11-01T16:33:22 |
|
celery/celery | 6,457 | celery__celery-6457 | [
"6445"
] | f50cf7d9944558167b85c14d73e8f790da251730 | diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,5 +1,6 @@
"""Celery Command Line Interface."""
import os
+import pathlib
import traceback
import click
@@ -94,6 +95,9 @@ def convert(self, value, param, ctx):
help_group="Global Options")
@click.option('--workdir',
cls=CeleryOption,
+ type=pathlib.Path,
+ callback=lambda _, __, wd: os.chdir(wd) if wd else None,
+ is_eager=True,
help_group="Global Options")
@click.option('-C',
'--no-color',
@@ -121,8 +125,6 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
click.echo(ctx.get_help())
ctx.exit()
- if workdir:
- os.chdir(workdir)
if loader:
# Default app takes loader from this env (Issue #1066).
os.environ['CELERY_LOADER'] = loader
| workdir option no longer works after upgrade to 5.0.1
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
software -> celery:5.0.1 (singularity) kombu:5.0.2 py:3.8.6
billiard:3.6.3.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:4.14.200-155.322.amzn2.x86_64 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:****************
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 5.0.1
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I think we are supposed to be able to run celery multi start in other directories with --workdir option passed.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
After upgrading to 5.0.1 from 4.x, the celery multi start command stopped working when not running from the workdir, even with workdir option passed in the command. The daemon service could not start as a result.
/opt/Cranberry/venv/bin/celery multi start worker1 --workdir=/opt/Cranberry/ --app=Cranberry
Error from the command above:
Error: Invalid value for '-A' / '--app':
Unable to load celery application.
Module 'cranberry' has no attribute 'celery'
> [email protected]: * Child terminated with exit code 2
FAILED
I inserted print('Workdir: ', workdir) at: https://github.com/celery/celery/blob/7c3da03a07882ca86b801ad78dd509a67cba60af/celery/bin/celery.py#L123
And it prints:
Workdir: None
Any help on this is much appreciated!
Updates 14:50:
Tried to debug it a little more and it seems multi ends up calling subprocess with "celery --app=Cranberry worker --workdir=/opt/Cranberry/ ..."
The command goes to celery.bin.celery.APP.convert.
I tried to print ctx.params there, and notice that the --workdir param only shows up when it's before --app in command:
> (venv) 2 ec2-user@ip:/opt$ celery --app=Cranberry --workdir=/opt/Cranberry/ worker
> app convert: Cranberry {}
> Usage: celery [OPTIONS] COMMAND [ARGS]...
>
> Error: Invalid value for '-A' / '--app':
> Unable to load celery application.
> The module Cranberry was not found.
>
> (venv) 2 ec2-user@:/opt$ celery --workdir=/opt/Cranberry/ --app=Cranberry worker
> app convert: Cranberry {'workdir': '/opt/Cranberry/'}
> Usage: celery [OPTIONS] COMMAND [ARGS]...
>
> Error: Invalid value for '-A' / '--app':
And this params does not seem to be used in os.chdir or get passed to find_app.
| @thedrow - looks like we do the chdir after we load the app since the click refactor. IIUC we need to respect `ctx.params` before we attempt to find the app. I have a draft PR at #6446
> @thedrow - looks like we do the chdir after we load the app since the click refactor.
Why is that the case? It's rather a surprising side effect.
@maybe-sybr @thedrow Thank you for the attention on this! I tried the code change in PR and it seems there's still a problem with args order, that when --app appears before --workdir in the command, the --workdir option would not get parsed to ctx.params.
Unfortunately that's how the celeryd script works. It put --workdir before --app, and then in celery.apps.multi the order gets reversed. https://github.com/celery/celery/blob/0833a270fae4738e128d56a63d0c4446ba0b1927/extra/generic-init.d/celeryd#L257
https://github.com/celery/celery/blob/0833a270fae4738e128d56a63d0c4446ba0b1927/celery/apps/multi.py#L178
I can modify the daemon script to make it work for my use case, but I'm not sure whether there're other impacts with the arg order problem or not.
It is a problem. The arguments order should not have any side effects.
That makes sense, AFAIK click will instantiate args using the `type` option passed in immediately upon processing so having a partial command line context at that point isn't too surprising - annoying though. I think what we might need is to defer app loading until we actually reach the `celery()` function when we know that the command line options context has been fully parsed,
I think we could potentially make workdir an eager option with a chdir callback per [0] to fix this misbehaviour properly.
[0] https://click.palletsprojects.com/en/7.x/options/?highlight=callback#callbacks-and-eager-options | 2020-11-03T00:29:05 |
|
celery/celery | 6,462 | celery__celery-6462 | [
"6450"
] | 42361bdd2cb858d24a896d447448b2a6bb47307d | diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -248,6 +248,9 @@ def _forget(self, task_id):
def cleanup(self):
"""Delete expired meta-data."""
+ if not self.expires:
+ return
+
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
| diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -485,6 +485,12 @@ def test_cleanup(self, mock_get_database):
mock_get_database.assert_called_once_with()
mock_collection.delete_many.assert_called()
+ self.backend.collections = mock_collection = Mock()
+ self.backend.expires = None
+
+ self.backend.cleanup()
+ mock_collection.delete_many.assert_not_called()
+
def test_get_database_authfailure(self):
x = MongoBackend(app=self.app)
x._get_connection = Mock()
| MongoDB backend TypeError: unsupported type for timedelta seconds component: NoneType
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [ x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 4.4.6 (cliffs)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
can't disclose that
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: Python 3.8.5
* **Minimal Celery Version**: 4.4.6 (cliffs)
* **Minimal Kombu Version**: 4.6.11
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
alembic==1.4.3
amqp==2.6.1
apache-airflow==1.10.12
apispec==1.3.3
appdirs==1.4.4
argcomplete==1.12.0
asgiref==3.2.10
astroid==2.4.2
attrs==19.3.0
Babel==2.8.0
billiard==3.6.3.0
cached-property==1.5.1
cachetools==4.1.1
cattrs==1.0.0
celery==4.4.6
certifi==2020.6.20
cffi==1.14.2
chardet==3.0.4
click==7.1.2
colorama==0.4.3
colorlog==4.0.2
configparser==3.5.3
coreapi==2.3.3
coreschema==0.0.4
croniter==0.3.34
cryptography==3.1
dataclasses==0.6
defusedxml==0.7.0rc1
dill==0.3.2
distlib==0.3.1
Django==3.0.5
django-braces==1.14.0
django-celery-beat==2.0.0
django-celery-results==1.2.1
django-cors-headers==3.5.0
django-debug-toolbar==2.2
django-extensions==3.0.9
django-oauth-toolkit==1.3.2
django-rest-framework-social-oauth2==1.1.0
django-rest-swagger==2.2.0
django-tenant-schemas==1.10.0
django-timezone-field==4.0
djangorestframework==3.11.1
djangorestframework-csv==2.1.0
djongo==1.3.3
dnspython==1.16.0
docutils==0.16
email-validator==1.1.1
eventlet==0.29.0
filelock==3.0.12
Flask==1.1.2
Flask-Admin==1.5.4
Flask-AppBuilder==2.3.4
Flask-Babel==1.0.0
Flask-Caching==1.3.3
Flask-JWT-Extended==3.24.1
Flask-Login==0.4.1
Flask-OpenID==1.2.5
Flask-SQLAlchemy==2.4.4
flask-swagger==0.2.14
Flask-WTF==0.14.3
funcsigs==1.0.2
future==0.18.2
gevent==20.6.2
google-api-core==1.22.2
google-api-python-client==1.11.0
google-auth==1.21.1
google-auth-httplib2==0.0.4
google-auth-oauthlib==0.4.1
google-cloud-core==1.4.1
google-cloud-logging==1.15.1
google-cloud-tasks==2.0.0
googleapis-common-protos==1.52.0
graphviz==0.14.1
greenlet==0.4.16
grpc-google-iam-v1==0.12.3
grpcio==1.32.0
gunicorn==20.0.4
httplib2==0.18.1
hvac==0.10.5
idna==2.10
iso8601==0.1.13
isort==5.5.2
itsdangerous==1.1.0
itypes==1.2.0
Jinja2==2.11.2
json-merge-patch==0.2
jsonschema==3.2.0
kombu==4.6.11
lazy-object-proxy==1.4.3
libcst==0.3.10
lockfile==0.12.2
Mako==1.1.3
Markdown==2.6.11
MarkupSafe==1.1.1
marshmallow==2.21.0
marshmallow-enum==1.5.1
marshmallow-sqlalchemy==0.23.1
mccabe==0.6.1
monotonic==1.5
mypy-extensions==0.4.3
mysqlclient==2.0.1
natsort==7.0.1
numpy==1.19.2
oauthlib==3.1.0
openapi-codec==1.3.2
ordered-set==4.0.2
pandas==1.1.2
pbr==5.5.0
pendulum==1.4.4
prison==0.1.3
proto-plus==1.9.1
protobuf==3.13.0
psutil==5.7.2
psycopg2==2.8.6
psycopg2-binary==2.8.6
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.20
pydot==1.4.1
Pygments==2.7.1
pygraphviz==1.6
PyJWT==1.7.1
pylint==2.6.0
pymongo==3.11.0
pyOpenSSL==19.1.0
pyparsing==2.4.7
pyrsistent==0.17.3
python-crontab==2.5.1
python-daemon==2.2.4
python-dateutil==2.8.1
python-editor==1.0.4
python-nvd3==0.15.0
python-slugify==4.0.1
python3-openid==3.2.0
pytz==2020.1
pytzdata==2020.1
PyYAML==5.3.1
redis==3.5.3
requests==2.24.0
requests-oauthlib==1.3.0
rsa==4.6
scout-apm==2.16.1
sentry-sdk==0.17.4
setproctitle==1.1.10
simplejson==3.17.2
six==1.15.0
social-auth-app-django==3.4.0
social-auth-core==3.3.3
SQLAlchemy==1.3.19
SQLAlchemy-JSONField==0.9.0
SQLAlchemy-Utils==0.36.8
sqlparse==0.2.4
stevedore==3.2.1
tabulate==0.8.7
tenacity==4.12.0
text-unidecode==1.3
thrift==0.13.0
toml==0.10.1
typing-extensions==3.7.4.3
typing-inspect==0.6.0
tzlocal==1.5.1
unicodecsv==0.14.1
uritemplate==3.0.1
urllib3==1.25.10
vine==1.3.0
virtualenv==20.0.31
virtualenv-clone==0.5.4
virtualenvwrapper==4.8.4
Werkzeug==0.16.1
wrapt==1.12.1
WTForms==2.3.3
zope.deprecation==4.4.0
zope.event==4.4
zope.interface==5.1.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Celery task is executed successfully
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Fails with TypeError:
```
Task celery.backend_cleanup[75aa0973-534f-4812-a992-1ead4086f58c] raised unexpected: TypeError('unsupported type for timedelta seconds component: NoneType')
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kombu/utils/objects.py", line 42, in __get__
return obj.__dict__[self.__name__]
KeyError: 'expires_delta'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/celery/app/builtins.py", line 25, in backend_cleanup
app.backend.cleanup()
File "/usr/local/lib/python3.9/site-packages/celery/backends/mongodb.py", line 258, in cleanup
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
File "/usr/local/lib/python3.9/site-packages/kombu/utils/objects.py", line 44, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/usr/local/lib/python3.9/site-packages/celery/backends/mongodb.py", line 312, in expires_delta
return timedelta(seconds=self.expires)
TypeError: unsupported type for timedelta seconds component: NoneType
```
| Please provide a reproducible test case.
I'll reopen this issue once you do.
When expires is None it means that no result expires right?
I have a fix anyway.
Yes, that makes sense to me. | 2020-11-03T14:36:19 |
celery/celery | 6,481 | celery__celery-6481 | [
"6404"
] | 2a6c7cfe3b1283961887bf1cb3f5aa6c8aa70820 | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -1,12 +1,14 @@
"""Actual App instance implementation."""
import inspect
import os
+import sys
import threading
import warnings
from collections import UserDict, defaultdict, deque
from datetime import datetime
from operator import attrgetter
+from click.exceptions import Exit
from kombu import pools
from kombu.clocks import LamportClock
from kombu.common import oid_from
@@ -342,6 +344,30 @@ def close(self):
self._pool = None
_deregister_app(self)
+ def start(self, argv=None):
+ from celery.bin.celery import celery
+
+ celery.params[0].default = self
+
+ try:
+ celery.main(args=argv, standalone_mode=False)
+ except Exit as e:
+ return e.exit_code
+ finally:
+ celery.params[0].default = None
+
+ def worker_main(self, argv=None):
+ if argv is None:
+ argv = sys.argv
+
+ if 'worker' not in argv:
+ raise ValueError(
+ "The worker sub-command must be specified in argv.\n"
+ "Use app.start() to programmatically start other commands."
+ )
+
+ self.start(argv=argv)
+
def task(self, *args, **opts):
"""Decorator to create a task class out of any callable.
| Following Application examples lead to Instance of 'Celery' has no 'worker_main' member
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [ x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x ] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
I'm running celery inside a django project, from a python script. Until 5.0, I was able to run celery using worker_main() method :
```
from celery import Celery
app = Celery()
app.worker_main()
```
From the [application related documentation page](https://docs.celeryproject.org/en/stable/userguide/application.html), nothing points out the fact that worker_main() method seems gone.
I suppose I miss something somewhere, but maybe because of some lack of clarity from documentation
| It's the same as
```
app.start()
```
@thedrow - Looks like some doco changes which didn't get captured in the click CLI refactor work you did prior to 5.0 (01651d2).
@NullYing
`app.start()
AttributeError: 'Celery' object has no attribute 'start'`
Seems that this worker_main issue is not the only one causing problems.
I've followed django related doc step by step, I can't get any working process. It just stops without any error message.
Uppercase config options beginning with a defined namespace (as explained in the doc) lead to regressions messages, and it seems impossible to define any specific config variables this way.
After 4 hours of fight, I give it up. Celery 5.0 is unusable for me, and not having an up to date configuration doesn't help. Unfortunatly, downgrading to 4.4 is the only way to have something working.
Appreciate the efforts though, but seems to me that some pieces are missing right now
I have the same issue with `celery==5.0.1` (e.g. no `worker_main` or `start`, no idea how to programmatically start a worker).
Hi, any updates?
> Uppercase config options beginning with a defined namespace (as explained in the doc) lead to regressions messages, and it seems impossible to define any specific config variables this way.
This is fixed in 5.0.2.
@maybe-sybr is correct. This seems like an oversight on my part.
I'll address this shortly.
Now I remember why I removed those methods.
It's harder to implement the same with Click.
I also don't understand why you'd need to pass the command line arguments as a list here.
Still looking into this.
Using the `CliRunner` from `click.testing` fails to run `celery worker`.
I think I can provide the `start` method without any overrides which is not enough.
Hey man,
I met the same issue on python3.8.
```
Traceback (most recent call last):
File "celery_worker.py", line 90, in <module>
app.worker_main(argv)
AttributeError: 'Celery' object has no attribute 'worker_main'
```
| 2020-11-15T14:32:39 |
|
celery/celery | 6,488 | celery__celery-6488 | [
"6426"
] | 28ebcce5d277839011f7782755ac8452b37d6afe | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -94,6 +94,11 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
+ # `detached()` will attempt to touch the logfile to confirm that error
+ # messages won't be lost after detaching stdout/err, but this means we need
+ # to pre-format it rather than relying on `setup_logging_subsystem()` like
+ # we can elsewhere.
+ logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
after_forkers=False):
try:
| `celery multi`creates xxx_%I.log files
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.0.1 (singularity) kombu:5.0.2 py:3.8.3
billiard:3.6.3.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:5.4.0-48-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:disabled
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 5.0.1
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```shell
$ celery multi start normal low --app=service.celery:app --logfile=/tmp/celery/celery-%n%I.log --queues:normal=normal --queues:low=low --concurrency:normal=2 --concurrency:low=1 --pidfile=/tmp/celery/celeryd-%n.pid
celery multi v5.0.1 (singularity)
> Starting nodes...
> normal@dell: OK
> low@dell: OK
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Celery 4.4.7 behavior
```shell
$ ls -l /tmp/celery
-rw-r--r-- 1 www-data www-data 6 Oct 19 14:39 celeryd-low.pid
-rw-r--r-- 1 www-data www-data 6 Oct 19 14:39 celeryd-normal.pid
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-low-1.log
-rw-rw-r-- 1 www-data www-data 316 Oct 19 14:39 celery-low.log
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-normal-1.log
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-normal-2.log
-rw-rw-r-- 1 www-data www-data 316 Oct 19 14:39 celery-normal.log
```
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```shell
$ ls -l /tmp/celery
-rw-r--r-- 1 www-data www-data 6 Oct 19 14:39 celeryd-low.pid
-rw-r--r-- 1 www-data www-data 6 Oct 19 14:39 celeryd-normal.pid
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-low-1.log
-rw-rw-r-- 1 www-data www-data 0 Oct 19 14:39 celery-low%I.log
-rw-rw-r-- 1 www-data www-data 316 Oct 19 14:39 celery-low.log
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-normal-1.log
-rw-rw-r-- 1 www-data www-data 80 Oct 19 14:39 celery-normal-2.log
-rw-rw-r-- 1 www-data www-data 0 Oct 19 14:39 celery-normal%I.log
-rw-rw-r-- 1 www-data www-data 316 Oct 19 14:39 celery-normal.log
```
| I can confirm this issue too on 5.0.1
I am aware of that problem.
I saw it myself but figured that other bugs need to be prioritized.
I'm adding this to the next bugfix release milestone.
If any of you have a solution, I'd gladly accept a PR.
I haven't checked the code for it yet (and not so known with the celery code-base) but gave it some thought:
Could it be that the main command (`celery multi`) also tries to process the `--logfile` parameter and because it doesn't do any further parsing on `%I` it creates that logfile? (While it also passes the parameter successfully on to its worker children commands)
It's the direction I had in mind as well.
@kwist-sgr - can you confirm for me that `/tmp/celery` was clean prior to running both commands? I assume that's the case and the extra empty log files are created by some confusion along the lines of what @mfonville hypothesised above, but it'd be nice to get a sanity check before I check this out myself. Just hard to tell since the timestamps are the same in both `ls` outputs!
*Edit:* nevermind, confirm the above myself since I had some free time. Looks like the culprit is:
https://github.com/celery/celery/blob/2a6c7cfe3b1283961887bf1cb3f5aa6c8aa70820/celery/platforms.py#L411-L414
which gets passed an unexpanded logfile value from `celery.bin.worker.detach()` | 2020-11-18T06:48:13 |
|
celery/celery | 6,501 | celery__celery-6501 | [
"6439",
"6439"
] | dea0bd1672cf8d0017f4dae3dfc216278637f90a | diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -7,6 +7,8 @@
import click.exceptions
from click.types import ParamType
from click_didyoumean import DYMGroup
+from click_plugins import with_plugins
+from pkg_resources import iter_entry_points
from celery import VERSION_BANNER
from celery.app.utils import find_app
@@ -69,6 +71,7 @@ def convert(self, value, param, ctx):
APP = App()
+@with_plugins(iter_entry_points('celery.commands'))
@click.group(cls=DYMGroup, invoke_without_command=True)
@click.option('-A',
'--app',
| Adding new command-line options no longer works
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
# Description
https://docs.celeryproject.org/en/stable/userguide/extending.html#extending-commandoptions
The described code no longer works in celery 5+
Looking at celery's source code, 'user_options' are now expected to be a set(). In fact they are initialized to a defaultdict(set())
I believe that one should provide a click.option, but when i tried that things still fell apart as click is later accessing whatever is provided.opts
# Suggestions
<!-- Please provide us suggestions for how to fix the documentation -->
Upgrade the documentation to support click or modify the code to allow the above example to work
Adding new command-line options no longer works
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
# Description
https://docs.celeryproject.org/en/stable/userguide/extending.html#extending-commandoptions
The described code no longer works in celery 5+
Looking at celery's source code, 'user_options' are now expected to be a set(). In fact they are initialized to a defaultdict(set())
I believe that one should provide a click.option, but when i tried that things still fell apart as click is later accessing whatever is provided.opts
# Suggestions
<!-- Please provide us suggestions for how to fix the documentation -->
Upgrade the documentation to support click or modify the code to allow the above example to work
| 2020-11-25T15:00:31 |
||
celery/celery | 6,524 | celery__celery-6524 | [
"6521"
] | a192f9cbf546e36b590166426d5e26a90964eeb1 | diff --git a/celery/backends/cache.py b/celery/backends/cache.py
--- a/celery/backends/cache.py
+++ b/celery/backends/cache.py
@@ -20,6 +20,10 @@
Please use one of the following backends instead: {1}\
"""
+# Global shared in-memory cache for in-memory cache client
+# This is to share cache between threads
+_DUMMY_CLIENT_CACHE = LRUCache(limit=5000)
+
def import_best_memcache():
if _imp[0] is None:
@@ -53,7 +57,7 @@ def Client(*args, **kwargs): # noqa
class DummyClient:
def __init__(self, *args, **kwargs):
- self.cache = LRUCache(limit=5000)
+ self.cache = _DUMMY_CLIENT_CACHE
def get(self, key, *args, **kwargs):
return self.cache.get(key)
| diff --git a/t/unit/backends/test_cache.py b/t/unit/backends/test_cache.py
--- a/t/unit/backends/test_cache.py
+++ b/t/unit/backends/test_cache.py
@@ -35,6 +35,16 @@ def test_no_backend(self):
with pytest.raises(ImproperlyConfigured):
CacheBackend(backend=None, app=self.app)
+ def test_memory_client_is_shared(self):
+ """This test verifies that memory:// backend state is shared over multiple threads"""
+ from threading import Thread
+ t = Thread(
+ target=lambda: CacheBackend(backend='memory://', app=self.app).set('test', 12345)
+ )
+ t.start()
+ t.join()
+ assert self.tb.client.get('test') == 12345
+
def test_mark_as_done(self):
assert self.tb.get_state(self.tid) == states.PENDING
assert self.tb.get_result(self.tid) is None
| celery_worker pytest fixture timeouts since celery 5.0.3
Since the 5.0.3 release of celery, the `celery_worker` pytest fixture leads to a timeout when performing ping check.
The issue can be reproduced using this simple test file:
```python
pytest_plugins = ["celery.contrib.pytest"]
def test_create_task(celery_app, celery_worker):
@celery_app.task
def mul(x, y):
return x * y
assert mul.delay(4, 4).get(timeout=10) == 16
```
Below is the pytest output:
```
$ pytest -sv test_celery_worker.py
============================================================================================== test session starts ===============================================================================================
platform linux -- Python 3.7.3, pytest-6.1.2, py-1.9.0, pluggy-0.13.1 -- /home/anlambert/.virtualenvs/swh/bin/python3
cachedir: .pytest_cache
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/home/anlambert/tmp/.hypothesis/examples')
rootdir: /home/anlambert/tmp
plugins: postgresql-2.5.2, asyncio-0.14.0, mock-3.3.1, cov-2.10.1, django-4.1.0, requests-mock-1.8.0, hypothesis-5.41.3, forked-1.3.0, swh.core-0.9.2.dev4+g6f9779f, flask-1.1.0, xdist-2.1.0, dash-1.17.0, swh.journal-0.5.2.dev1+g12b31a2
collected 1 item
test_celery_worker.py::test_create_task ERROR
===================================================================================================== ERRORS =====================================================================================================
_______________________________________________________________________________________ ERROR at setup of test_create_task _______________________________________________________________________________________
request = <SubRequest 'celery_worker' for <Function test_create_task>>, celery_app = <Celery celery.tests at 0x7f99b4b91d30>, celery_includes = (), celery_worker_pool = 'solo', celery_worker_parameters = {}
@pytest.fixture()
def celery_worker(request,
celery_app,
celery_includes,
celery_worker_pool,
celery_worker_parameters):
# type: (Any, Celery, Sequence[str], str, Any) -> WorkController
"""Fixture: Start worker in a thread, stop it when the test returns."""
if not NO_WORKER:
for module in celery_includes:
celery_app.loader.import_task_module(module)
with worker.start_worker(celery_app,
pool=celery_worker_pool,
> **celery_worker_parameters) as w:
../dev/celery/celery/contrib/pytest.py:196:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/contextlib.py:112: in __enter__
return next(self.gen)
../dev/celery/celery/contrib/testing/worker.py:82: in start_worker
assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
../dev/celery/celery/result.py:230: in get
on_message=on_message,
../dev/celery/celery/backends/base.py:655: in wait_for_pending
no_ack=no_ack,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <celery.backends.cache.CacheBackend object at 0x7f99b411fb00>, task_id = '98b047a2-2027-453c-a317-eb31f44a2547', timeout = 10.0, interval = 0.5, no_ack = True, on_interval = <promise@0x7f99b4a2adf0>
def wait_for(self, task_id,
timeout=None, interval=0.5, no_ack=True, on_interval=None):
"""Wait for task and return its result.
If the task raises an exception, this exception
will be re-raised by :func:`wait_for`.
Raises:
celery.exceptions.TimeoutError:
If `timeout` is not :const:`None`, and the operation
takes longer than `timeout` seconds.
"""
self._ensure_not_eager()
time_elapsed = 0.0
while 1:
meta = self.get_task_meta(task_id)
if meta['status'] in states.READY_STATES:
return meta
if on_interval:
on_interval()
# avoid hammering the CPU checking status.
time.sleep(interval)
time_elapsed += interval
if timeout and time_elapsed >= timeout:
> raise TimeoutError('The operation timed out.')
E celery.exceptions.TimeoutError: The operation timed out.
../dev/celery/celery/backends/base.py:687: TimeoutError
============================================================================================ short test summary info =============================================================================================
ERROR test_celery_worker.py::test_create_task - celery.exceptions.TimeoutError: The operation timed out.
=============================================================================================== 1 error in 10.41s ================================================================================================
```
After a quick `git bisect` session, I managed to identify the commit that introduced the issue: https://github.com/celery/celery/commit/e2031688284484d5b5a57ba29cd9cae2d9a81e39
| 2020-12-07T09:19:54 |
|
celery/celery | 6,578 | celery__celery-6578 | [
"6577"
] | 2dd6769d1f24d4af8a7edb66f8de9f0f6ee1c371 | diff --git a/celery/backends/base.py b/celery/backends/base.py
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -928,7 +928,9 @@ def on_chord_part_return(self, request, state, result, **kwargs):
j = deps.join_native if deps.supports_native_join else deps.join
try:
with allow_join_result():
- ret = j(timeout=3.0, propagate=True)
+ ret = j(
+ timeout=app.conf.result_chord_join_timeout,
+ propagate=True)
except Exception as exc: # pylint: disable=broad-except
try:
culprit = next(deps._failed_join_report())
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -469,7 +469,10 @@ def on_chord_part_return(self, request, state, result,
else header_result.join
)
with allow_join_result():
- resl = join_func(timeout=3.0, propagate=True)
+ resl = join_func(
+ timeout=app.conf.result_chord_join_timeout,
+ propagate=True
+ )
else:
# Otherwise simply extract and decode the results we
# stashed along the way, which should be faster for large
| diff --git a/t/unit/backends/test_base.py b/t/unit/backends/test_base.py
--- a/t/unit/backends/test_base.py
+++ b/t/unit/backends/test_base.py
@@ -786,6 +786,18 @@ def callback(result):
callback.backend.fail_from_current_stack = Mock()
yield task, deps, cb
+ def test_chord_part_return_timeout(self):
+ with self._chord_part_context(self.b) as (task, deps, _):
+ try:
+ self.app.conf.result_chord_join_timeout += 1.0
+ self.b.on_chord_part_return(task.request, 'SUCCESS', 10)
+ finally:
+ self.app.conf.result_chord_join_timeout -= 1.0
+
+ self.b.expire.assert_not_called()
+ deps.delete.assert_called_with()
+ deps.join_native.assert_called_with(propagate=True, timeout=4.0)
+
def test_chord_part_return_propagate_set(self):
with self._chord_part_context(self.b) as (task, deps, _):
self.b.on_chord_part_return(task.request, 'SUCCESS', 10)
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1012,6 +1012,21 @@ def test_apply_chord_complex_header(self):
mock_header_result.save.assert_called_once_with(backend=self.b)
mock_header_result.save.reset_mock()
+ def test_on_chord_part_return_timeout(self, complex_header_result):
+ tasks = [self.create_task(i) for i in range(10)]
+ random.shuffle(tasks)
+ try:
+ self.app.conf.result_chord_join_timeout += 1.0
+ for task, result_val in zip(tasks, itertools.cycle((42, ))):
+ self.b.on_chord_part_return(
+ task.request, states.SUCCESS, result_val,
+ )
+ finally:
+ self.app.conf.result_chord_join_timeout -= 1.0
+
+ join_func = complex_header_result.return_value.join_native
+ join_func.assert_called_once_with(timeout=4.0, propagate=True)
+
@pytest.mark.parametrize("supports_native_join", (True, False))
def test_on_chord_part_return(
self, complex_header_result, supports_native_join,
| Chord join timeout is still hardcoded somewhere
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- Allow GroupResult.join timeout to be configurable in celery.chord_unlock (#5349)
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: master
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
Look through celery source code to find out that base and redis result backend `join_func` is called with hardcoded timeout:
[base.py](https://github.com/celery/celery/blob/master/celery/backends/base.py#L930)
```
with allow_join_result():
ret = j(timeout=3.0, propagate=True)
```
[redis.py](https://github.com/celery/celery/blob/master/celery/backends/redis.py#L471)
```
with allow_join_result():
resl = join_func(timeout=3.0, propagate=True)
```
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I expect that `result_chord_join_timeout` will affect timeout while working with chord parts.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
With very large chords there are sudden timeouts which can't be eliminated by changing appropriate configuration option.
| 2021-01-05T07:56:26 |
|
celery/celery | 6,589 | celery__celery-6589 | [
"4298"
] | eac0c12a502e742082155561eae50db1b0fad967 | diff --git a/celery/utils/functional.py b/celery/utils/functional.py
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -3,7 +3,7 @@
import sys
from collections import UserList
from functools import partial
-from itertools import chain, islice
+from itertools import islice
from kombu.utils.functional import (LRUCache, dictfilter, is_list, lazy,
maybe_evaluate, maybe_list, memoize)
@@ -182,6 +182,7 @@ def __init__(self, it):
self.__it = it
self.__index = 0
self.__consumed = []
+ self.__done = False
def __reduce__(self):
return list, (self.data,)
@@ -190,7 +191,13 @@ def __length_hint__(self):
return self.__it.__length_hint__()
def __iter__(self):
- return chain(self.__consumed, self.__it)
+ for x in self.__consumed:
+ yield x
+ if not self.__done:
+ for x in self.__it:
+ self.__consumed.append(x)
+ yield x
+ self.__done = True
def __getitem__(self, index):
if index < 0:
@@ -198,14 +205,26 @@ def __getitem__(self, index):
try:
return self.__consumed[index]
except IndexError:
+ it = iter(self)
try:
for _ in range(self.__index, index + 1):
- self.__consumed.append(next(self.__it))
+ next(it)
except StopIteration:
raise IndexError(index)
else:
return self.__consumed[index]
+ def __bool__(self):
+ if len(self.__consumed):
+ return True
+
+ try:
+ next(iter(self))
+ except StopIteration:
+ return False
+ else:
+ return True
+
@property
def data(self):
try:
| diff --git a/t/unit/utils/test_functional.py b/t/unit/utils/test_functional.py
--- a/t/unit/utils/test_functional.py
+++ b/t/unit/utils/test_functional.py
@@ -1,11 +1,10 @@
import pytest
-from kombu.utils.functional import lazy
-
from celery.utils.functional import (DummyContext, first, firstmethod,
fun_accepts_kwargs, fun_takes_argument,
head_from_fun, maybe_list, mlazy,
padlist, regen, seq_concat_item,
seq_concat_seq)
+from kombu.utils.functional import lazy
def test_DummyContext():
@@ -94,8 +93,11 @@ def test_list(self):
fun, args = r.__reduce__()
assert fun(*args) == l
- def test_gen(self):
- g = regen(iter(list(range(10))))
+ @pytest.fixture
+ def g(self):
+ return regen(iter(list(range(10))))
+
+ def test_gen(self, g):
assert g[7] == 7
assert g[6] == 6
assert g[5] == 5
@@ -107,17 +109,19 @@ def test_gen(self):
assert g.data, list(range(10))
assert g[8] == 8
assert g[0] == 0
- g = regen(iter(list(range(10))))
+
+ def test_gen__index_2(self, g):
assert g[0] == 0
assert g[1] == 1
assert g.data == list(range(10))
- g = regen(iter([1]))
- assert g[0] == 1
+
+ def test_gen__index_error(self, g):
+ assert g[0] == 0
with pytest.raises(IndexError):
- g[1]
- assert g.data == [1]
+ g[11]
+ assert list(iter(g)) == list(range(10))
- g = regen(iter(list(range(10))))
+ def test_gen__negative_index(self, g):
assert g[-1] == 9
assert g[-2] == 8
assert g[-3] == 7
@@ -128,6 +132,21 @@ def test_gen(self):
assert list(iter(g)) == list(range(10))
+ def test_nonzero__does_not_consume_more_than_first_item(self):
+ def build_generator():
+ yield 1
+ self.consumed_second_item = True
+ yield 2
+
+ self.consumed_second_item = False
+ g = regen(build_generator())
+ assert bool(g)
+ assert g[0] == 1
+ assert not self.consumed_second_item
+
+ def test_nonzero__empty_iter(self):
+ assert not regen(iter([]))
+
class test_head_from_fun:
| group.skew() removes all tasks if group input is generator
## Checklist
- [X] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
python 3.6.2
celery==4.1.0
amqp==2.2.1
billiard==3.5.0.3
- [X] I have verified that the issue exists against the `master` branch of Celery.
As of `be55de6`
## Steps to reproduce
```
@celery.task(bind=True)
def noop(task):
pass
```
1. `group = celery.group(noop.si() for i in range(10))`
2. `group.skew()` -> `group(<empty>)`
3. `group.apply_async()` or `group()` -> `<GroupResult: 66fd517e-123b-4299-8cbe-9183b3f02626 []>`
## Expected behavior
```
<GroupResult: d610c923-e939-4199-9bb7-7bc89daa2ccb [9152bd60-f815-4bbe-a407-a003db34b19d, 7fc2d0ac-72ab-48b1-9083-be0f3b50b00d, d7c0a00a-85c3-4780-88f9-23be8da6fb73, 3737944f-91a4-4886-95c6-4960ef4764a7, a8da96ea-d2c4-4876-8026-e0c24c7d508a, 83ee5316-fb2f-472b-8e94-284144f92438, 5715a751-f28d-4deb-ac15-114854570fde, bc1646bf-cec9-4740-8e74-411d7b66dcca, 337aa1f3-5606-45b2-af68-5c5527289019, f5fffb98-f2f5-4423-a656-8c1d7e93291a]>
```
## Actual behavior
```
<GroupResult: 66fd517e-123b-4299-8cbe-9183b3f02626 []>
```
### Notes
If you `print(group)`(or something that evaluates `self.tasks`) before calling `group.skew()`, it works normally.
| could you plz check the master if it still exists?
Yes. Still happening as of 2547666a1ea13b27bc13ef296ae43a163ecd4ab3
any workaround?
I believe this is a bug in `celery.utils.functional.regen`. Compare the outputs of `regen` on 4.1.0 (which has this bug) and 3.1.25 (which doesn't):
On Celery 4.1.0:
```
In [1]: from celery.utils.functional import regen
In [2]: gen = regen(i for i in range(10))
In [3]: [i for i in gen]
Out[3]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
In [4]: [i for i in gen]
Out[4]: []
```
On Celery 3.1.25:
```
In [1]: from celery.utils.functional import regen
In [2]: gen = regen(i for i in range(10))
In [3]: [i for i in gen]
Out[3]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
In [4]: [i for i in gen]
Out[4]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
In [5]: [i for i in gen]
Out[5]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
Looking at https://github.com/celery/celery/commit/9982773022d3de2d41ca59509220763da527b20e it would appear that `__consumed` doesn't get updated inside `__iter__`.
Doing `print(group)` first makes things work as expected because `__getitem__` is called and populates `__consumed` correctly. Then subsequent calls to `__iter__` work as expected too.
A more proper workaround is to either only pass lists and tuples to `group`, or if you're using something like `Task.chunks`, reassign `tasks` with a list, e.g.:
```
mytask.chunks(range(10), 2).group()
mytask.tasks = list(mytask.tasks)
mytask.skew(step=15)
```
Finally, https://github.com/celery/celery/pull/4459 will (probably) fix this issue too. @auvipy would you mind adding tests for this bug into your PR? Or if you prefer, I can open a separate PR that addresses this issue only.
plz proceed with the tests
Same issue:
celery==5.0.2
@maybe-sybr would be great if you could also take a look here when possible
I traced this back to get repro on the originally reported versions as well as current master (420e3931a). Worth noting that I made the mistake of thinking it was fixed because I was printing the tasks in the group between steps in the MRTC in this issue's summary to see what was going on. Doing so concretises the generator underlying the `_regen` instance. Here's my new MRTC which needs a backend running for the task submission if the expected behaviour is to be observed:
```python
import celery
app = celery.Celery(name="app")
@app.task(bind=True)
def noop(self):
pass
def gen():
#assert False
yield from (noop.si() for i in range(10))
g = celery.group(gen())
# Uncomment this to see the correct behaviour - but you'll need a backend
# running since that'll mean we acually have tasks to submit.
#print(g.tasks)
gs = g.skew()
r = g()
print(repr(r))
```
https://github.com/celery/celery/issues/4298#issuecomment-377276099 is correct in that this is breakage in `_regen` which has already been fixed in #4459 and #6112, but neither have been merged. I'd like to see the targeted fix pulled out of those PRs and merged to close this issue rather than waiting on the more complex PR to be done. I'll do that when I have some time in the next day or so. | 2021-01-11T20:25:48 |
celery/celery | 6,599 | celery__celery-6599 | [
"6517"
] | 8a4056087aeac6a5be79a2db4d6f06975f754609 | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -325,6 +325,7 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
pidfile=node_format(pidfile, hostname),
statedb=node_format(statedb, hostname),
no_color=ctx.obj.no_color,
+ quiet=ctx.obj.quiet,
**kwargs)
worker.start()
return worker.exitcode
| celery worker -q --quiet seems not working with 4.4.7
Running celery 4.4.7 [not woking]
Running celery 4.4.6 [ok]
Running celery 4.4.1 [ok]
commands :
> celery -A proj worker --loglevel=info -P eventlet --quiet
> celery -A proj worker --loglevel=info -P eventlet -q
| did you try 5.0.x versions?
I've recently tried this with the latest celery version (5.0.5), and the issue remains.
The reason seems to be that in [`celery.bin.worker.worker`](https://github.com/celery/celery/blob/master/celery/bin/worker.py#L326) the `ctx.obj.quiet` option is not passed.
The fixed code would look something like this:
```python
worker = app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=logfile, # node format handled by celery.app.log.setup
pidfile=node_format(pidfile, hostname),
statedb=node_format(statedb, hostname),
no_color=ctx.obj.no_color,
quiet=ctx.obj.quiet,
**kwargs)
``` | 2021-01-17T06:01:50 |
|
celery/celery | 6,614 | celery__celery-6614 | [
"6476"
] | 29eda054555fa95c83210e5e6bc3e839c80bcd3b | diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -255,6 +255,7 @@ def __repr__(self):
False, type='bool', old={'celery_eager_propagates_exceptions'},
),
ignore_result=Option(False, type='bool'),
+ store_eager_result=Option(False, type='bool'),
protocol=Option(2, type='int', old={'celery_task_protocol'}),
publish_retry=Option(
True, type='bool', old={'celery_task_publish_retry'},
diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -309,6 +309,7 @@ class Task:
('acks_on_failure_or_timeout', 'task_acks_on_failure_or_timeout'),
('reject_on_worker_lost', 'task_reject_on_worker_lost'),
('ignore_result', 'task_ignore_result'),
+ ('store_eager_result', 'task_store_eager_result'),
('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'),
)
diff --git a/celery/app/trace.py b/celery/app/trace.py
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -159,9 +159,13 @@ def __init__(self, state, retval=None):
def handle_error_state(self, task, req,
eager=False, call_errbacks=True):
- store_errors = not eager
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored
+ elif eager and task.store_eager_result:
+ store_errors = True
+ else:
+ store_errors = not eager
+
return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
@@ -316,7 +320,13 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
- publish_result = not eager and not ignore_result
+
+ # #6476
+ if eager and not ignore_result and task.store_eager_result:
+ publish_result = True
+ else:
+ publish_result = not eager and not ignore_result
+
hostname = hostname or gethostname()
inherit_parent_priority = app.conf.task_inherit_parent_priority
| diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -18,6 +18,9 @@ To test task behavior in unit tests the preferred method is mocking.
of what happens in a worker, and there are many discrepancies
between the emulation and what happens in reality.
+ Note that eagerly executed tasks don't write results to backend by default.
+ If you want to enable this functionality, have a look at :setting:`task_store_eager_result`.
+
A Celery task is much like a web view, in that it should only
define how to perform the action in the context of being called as a task.
diff --git a/t/unit/tasks/test_trace.py b/t/unit/tasks/test_trace.py
--- a/t/unit/tasks/test_trace.py
+++ b/t/unit/tasks/test_trace.py
@@ -1,4 +1,4 @@
-from unittest.mock import Mock, patch
+from unittest.mock import ANY, Mock, patch
import pytest
from billiard.einfo import ExceptionInfo
@@ -148,6 +148,75 @@ def add(x, y):
with pytest.raises(MemoryError):
self.trace(add, (2, 2), {}, eager=False)
+ def test_eager_task_does_not_store_result_even_if_not_ignore_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ add.backend = Mock(name='backend')
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.mark_as_done.assert_called_once_with(
+ 'id-1', # task_id
+ 4, # result
+ ANY, # request
+ False # store_result
+ )
+
+ def test_eager_task_does_not_call_store_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = BaseDictBackend(app=self.app)
+ backend.store_result = Mock()
+ add.backend = backend
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.store_result.assert_not_called()
+
+ def test_eager_task_will_store_result_if_proper_setting_is_set(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ add.backend = Mock(name='backend')
+ add.store_eager_result = True
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.mark_as_done.assert_called_once_with(
+ 'id-1', # task_id
+ 4, # result
+ ANY, # request
+ True # store_result
+ )
+
+ def test_eager_task_with_setting_will_call_store_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = BaseDictBackend(app=self.app)
+ backend.store_result = Mock()
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.store_result.assert_called_once_with(
+ 'id-1',
+ 4,
+ states.SUCCESS,
+ request=ANY
+ )
+
def test_when_backend_raises_exception(self):
@self.app.task(shared=False)
def add(x, y):
@@ -413,6 +482,32 @@ def test_handle_error_state(self):
call_errbacks=True,
)
+ def test_handle_error_state_for_eager_task(self):
+ x = self.TI(states.FAILURE)
+ x.handle_failure = Mock()
+
+ x.handle_error_state(self.add, self.add.request, eager=True)
+ x.handle_failure.assert_called_once_with(
+ self.add,
+ self.add.request,
+ store_errors=False,
+ call_errbacks=True,
+ )
+
+ def test_handle_error_for_eager_saved_to_backend(self):
+ x = self.TI(states.FAILURE)
+ x.handle_failure = Mock()
+
+ self.add.store_eager_result = True
+
+ x.handle_error_state(self.add, self.add.request, eager=True)
+ x.handle_failure.assert_called_with(
+ self.add,
+ self.add.request,
+ store_errors=True,
+ call_errbacks=True,
+ )
+
@patch('celery.app.trace.ExceptionInfo')
def test_handle_reject(self, ExceptionInfo):
x = self.TI(states.FAILURE)
| `publish_result` is False when tasks are eager and and we DON'T want to ignore results
#### Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
#### Related Issues
- #5398
- https://github.com/celery/django-celery-results/issues/49
#### Possible Duplicates
- None
### Python Packages
Latest, not released django-celery-results
https://github.com/celery/django-celery-results/pull/172/commits/b4acbf24ea3521ecfdbf821ece7ca62e49ff2765
Currently published version doesn't support Celery 5.x, but all versions are affected
## Minimally Reproducible Test Case
Source of the issue
https://github.com/celery/celery/blob/2a6c7cfe3b1283961887bf1cb3f5aa6c8aa70820/celery/app/trace.py#L323
```python
import pytest
@pytest.mark.parametrize(
"eager,ignore_result,publish_result",
[
(True, True, False),
(True, False, True), # fails
(False, True, False),
(False, False, True),
]
)
def test_publish_result_current(eager, ignore_result, publish_result):
actual_publish_result = not eager and not ignore_result
assert publish_result == actual_publish_result
@pytest.mark.parametrize(
"eager,ignore_result,publish_result",
[
(True, True, False),
(True, False, True),
(False, True, False),
(False, False, True),
]
)
def test_publish_result_desired(eager, ignore_result, publish_result):
actual_publish_result = not ignore_result
assert publish_result == actual_publish_result
```
# Expected Behavior
``always_eager`` exists so we can use Celery in unit tests. I want to execute a task eagerly and later on check result from the backend.
# Actual Behavior
When task is eager and it's set NOT to ignore the result, the result will be ignored.
| did you try the latest release & share your feedback?
it would be great if you can share a possible solution
I tried the latest release.
It appears to me that someone made the decision that whenever a task is eager, nothing should be saved to the backend. But not only I struggle to understand what would be the purpose of this feature but also it's not documented anywhere (AFAIK). So could you please confirm that the current behavior is not the desired behavior?
Once you confirm I'll be happy to fix it.
I think we do need to save the result. However, if this was never done before in any version of Celery then this is not a bug but a feature request.
I'd like to have some input from @celery/core-developers.
> I tried the latest release.
>
> It appears to me that someone made the decision that whenever a task is eager, nothing should be saved to the backend. But not only I struggle to understand what would be the purpose of this feature but also it's not documented anywhere (AFAIK). So could you please confirm that the current behavior is not the desired behavior?
>
> Once you confirm I'll be happy to fix it.
based on the issues raised https://github.com/celery/django-celery-results/issues/49 I believe we should save the results, however, we should carefully check the possible side effects.
@tomwojcik still up for this?
Yes @auvipy . I already tried to fix this but some tests started to fail due to some side effects but I think some are to be expected. I will give it another try over the weekend and let you know if I won't manage to fix this myself.
I personally, haven't used eager execution of the task. But if I understand it correctly that it execute task locally without sending it to the queue, it makes sense to me that the result is not stored to result storage (since it is executed directly by the caller). But still haven't used it so maybe there is need for storing it in result store.
> Yes @auvipy . I already tried to fix this but some tests started to fail due to some side effects but I think some are to be expected. I will give it another try over the weekend and let you know if I won't manage to fix this myself.
don't hesitate to come with A draft PR as well. so that if you are not sure about something, we could share our insight with you. | 2021-01-24T19:53:14 |
celery/celery | 6,624 | celery__celery-6624 | [
"6623"
] | 29eda054555fa95c83210e5e6bc3e839c80bcd3b | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -219,9 +219,6 @@ class Task:
#: The result store backend used for this task.
backend = None
- #: If disabled this task won't be registered automatically.
- autoregister = True
-
#: If enabled the task will report its status as 'started' when the task
#: is executed by a worker. Disabled by default as the normal behavior
#: is to not report that level of granularity. Tasks are either pending,
| remove misleading Task.autoregister documentation (and the unused field)
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
<!--
Please describe what's missing or incorrect about our documentation.
Include links and/or screenshots which will aid us to resolve the issue.
-->
The documentation for celery.app.Task describes an attribute `autoregister` and implies that if set to true the task will be registered automatically ("If disabled this task won’t be registered automatically."). However this feature has sadly been removed with the celery 4.0 release.
https://docs.celeryproject.org/en/latest/reference/celery.app.task.html#autoregister
The attribute doesn't appear to be used anywhere, a quick search of the code base found only one other occurrence in the changelog for v1.0.
# Suggestions
Either re-add the automatic registration for class based tasks, remove the field or clarify the documentation.
| would you mind sending a PR to clean this up? | 2021-02-03T11:08:30 |
|
celery/celery | 6,629 | celery__celery-6629 | [
"6533"
] | c7f2f141627de69645d1885b000b12def97152ec | diff --git a/celery/backends/base.py b/celery/backends/base.py
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -853,7 +853,11 @@ def _store_result(self, task_id, result, state,
if current_meta['status'] == states.SUCCESS:
return result
- self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state)
+ try:
+ self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state)
+ except BackendStoreError as ex:
+ raise BackendStoreError(str(ex), state=state, task_id=task_id) from ex
+
return result
def _save_group(self, group_id, result):
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -12,7 +12,7 @@
from celery import states
from celery._state import task_join_will_block
from celery.canvas import maybe_signature
-from celery.exceptions import ChordError, ImproperlyConfigured
+from celery.exceptions import BackendStoreError, ChordError, ImproperlyConfigured
from celery.result import GroupResult, allow_join_result
from celery.utils.functional import dictfilter
from celery.utils.log import get_logger
@@ -192,6 +192,10 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
supports_autoexpire = True
supports_native_join = True
+ #: Maximal length of string value in Redis.
+ #: 512 MB - https://redis.io/topics/data-types
+ _MAX_STR_VALUE_SIZE = 536870912
+
def __init__(self, host=None, port=None, db=None, password=None,
max_connections=None, url=None,
connection_pool=None, **kwargs):
@@ -364,6 +368,9 @@ def on_connection_error(self, max_retries, exc, intervals, retries):
return tts
def set(self, key, value, **retry_policy):
+ if len(value) > self._MAX_STR_VALUE_SIZE:
+ raise BackendStoreError('value too large for Redis backend')
+
return self.ensure(self._set, (key, value), **retry_policy)
def _set(self, key, value):
diff --git a/celery/exceptions.py b/celery/exceptions.py
--- a/celery/exceptions.py
+++ b/celery/exceptions.py
@@ -288,7 +288,7 @@ def __repr__(self):
class BackendStoreError(BackendError):
- """An issue writing from the backend."""
+ """An issue writing to the backend."""
def __init__(self, *args, **kwargs):
self.state = kwargs.get('state', "")
| diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -12,7 +12,7 @@
from celery import signature, states, uuid
from celery.canvas import Signature
-from celery.exceptions import ChordError, ImproperlyConfigured
+from celery.exceptions import BackendStoreError, ChordError, ImproperlyConfigured
from celery.utils.collections import AttributeDict
@@ -675,6 +675,10 @@ def test_set_expires(self):
key, 512,
)
+ def test_set_raises_error_on_large_value(self):
+ with pytest.raises(BackendStoreError):
+ self.b.set('key', 'x' * (self.b._MAX_STR_VALUE_SIZE + 1))
+
class test_RedisBackend_chords_simple(basetest_RedisBackend):
@pytest.fixture(scope="class", autouse=True)
| Workers retry Redis connection when task result is larger than 512MB - retry is useless as it never fits to Redis
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.0.4 (singularity)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.0.4 (singularity) kombu:5.0.2 py:3.9.0
billiard:3.6.3.0 py-amqp:5.0.2
platform -> system:Linux arch:64bit, ELF
kernel version:5.9.12-arch1-1 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://:**@****:6379/10
broker_url: 'amqp://**:********@*****:5672/**'
result_backend: 'redis://:********@*****:6379/10'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.2
billiard==3.6.3.0
celery @ git+https://github.com/celery/celery.git@420e3931a63538bd225ef57916deccf53cbcb57a // == master, tried also 5.0.4
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.1.6
kombu==5.0.2
prompt-toolkit==3.0.8
pytz==2020.4
redis==3.5.3
six==1.15.0
vine==5.0.0
wcwidth==0.2.5
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
#!/usr/bin/env python3
from celery import Celery
app = Celery(
'tasks',
broker='amqp://user:***@**:5672/**',
backend='redis://:**@**:6379/1',
)
@app.task(ignore_result=False)
def test(*args, **kwargs):
return 'x' * 536870911 # 512MB
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I'd except an exception or error. There is no point in retrying storing result to Redis when it simply never fits there. String limit is 512 MB.
I could check size of the data I'm returning in task. However, Celery adds additional metadata to my result so I do not know to what size should I limit my result.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```
[2020-12-09 08:38:52,786: ERROR/ForkPoolWorker-8] Connection to Redis lost: Retry (0/20) now.
[2020-12-09 08:38:53,328: ERROR/ForkPoolWorker-8] Connection to Redis lost: Retry (1/20) in 1.00 second.
[2020-12-09 08:38:54,940: ERROR/ForkPoolWorker-8] Connection to Redis lost: Retry (2/20) in 1.00 second.
[2020-12-09 08:38:56,547: ERROR/ForkPoolWorker-8] Connection to Redis lost: Retry (3/20) in 1.00 second.
.... and so on
```
Redis fails with the following error when you try to set string larger than 512 MB:
```
raise ConnectionError("Error %s while writing to socket. %s." %
redis.exceptions.ConnectionError: Error 32 while writing to socket. Broken pipe.
```
Tried via another script with direct Redis connection. This says nothing about input size therefore I think value size should be checked before sending data to Redis.
| Pull requests are welcome. | 2021-02-07T09:06:11 |
celery/celery | 6,668 | celery__celery-6668 | [
"5646"
] | f091bab758cf430640678a16a759892bdd352800 | diff --git a/celery/app/routes.py b/celery/app/routes.py
--- a/celery/app/routes.py
+++ b/celery/app/routes.py
@@ -2,8 +2,8 @@
Contains utilities for working with task routers, (:setting:`task_routes`).
"""
+import fnmatch
import re
-import string
from collections import OrderedDict
from collections.abc import Mapping
@@ -23,11 +23,6 @@
__all__ = ('MapRoute', 'Router', 'prepare')
-def glob_to_re(glob, quote=string.punctuation.replace('*', '')):
- glob = ''.join('\\' + c if c in quote else c for c in glob)
- return glob.replace('*', '.+?')
-
-
class MapRoute:
"""Creates a router out of a :class:`dict`."""
@@ -39,7 +34,7 @@ def __init__(self, map):
if isinstance(k, Pattern):
self.patterns[k] = v
elif '*' in k:
- self.patterns[re.compile(glob_to_re(k))] = v
+ self.patterns[re.compile(fnmatch.translate(k))] = v
else:
self.map[k] = v
@@ -126,6 +121,7 @@ def expand_router_string(router):
def prepare(routes):
"""Expand the :setting:`task_routes` setting."""
+
def expand_route(route):
if isinstance(route, (Mapping, list, tuple)):
return MapRoute(route)
| diff --git a/t/unit/app/test_routes.py b/t/unit/app/test_routes.py
--- a/t/unit/app/test_routes.py
+++ b/t/unit/app/test_routes.py
@@ -16,6 +16,7 @@ def Router(app, *args, **kwargs):
def E(app, queues):
def expand(answer):
return Router(app, [], queues).expand_destination(answer)
+
return expand
@@ -46,6 +47,7 @@ def setup(self):
@self.app.task(shared=False)
def mytask(*args, **kwargs):
pass
+
self.mytask = mytask
def assert_routes_to_queue(self, queue, router, name,
@@ -56,7 +58,8 @@ def assert_routes_to_queue(self, queue, router, name,
kwargs = {}
if args is None:
args = []
- assert router.route(options, name, args, kwargs)['queue'].name == queue
+ assert router.route(options, name, args, kwargs)[
+ 'queue'].name == queue
def assert_routes_to_default_queue(self, router, name, *args, **kwargs):
self.assert_routes_to_queue(
@@ -85,10 +88,13 @@ def test_route_for_task__glob(self):
from re import compile
route = routes.MapRoute([
+ ('proj.tasks.bar*', {'queue': 'routeC'}),
('proj.tasks.*', 'routeA'),
('demoapp.tasks.bar.*', {'exchange': 'routeB'}),
(compile(r'(video|image)\.tasks\..*'), {'queue': 'media'}),
])
+ assert route('proj.tasks.bar') == {'queue': 'routeC'}
+ assert route('proj.tasks.bar.baz') == {'queue': 'routeC'}
assert route('proj.tasks.foo') == {'queue': 'routeA'}
assert route('demoapp.tasks.bar.moo') == {'exchange': 'routeB'}
assert route('video.tasks.foo') == {'queue': 'media'}
@@ -97,7 +103,7 @@ def test_route_for_task__glob(self):
def test_expand_route_not_found(self):
expand = E(self.app, self.app.amqp.Queues(
- self.app.conf.task_queues, False))
+ self.app.conf.task_queues, False))
route = routes.MapRoute({'a': {'queue': 'x'}})
with pytest.raises(QueueNotFound):
expand(route('a'))
| glob behavior in task_routes interprets * as one or more characters, not zero or more
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
Commit 02f95470a781369d2b9c4fa7105d879fb0dae3b1 (March 4 2016) and later
```
</p>
</details>
# Steps to Reproduce
The [`task_routes`](http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-routes) configuration parameter allows defining a task route using a glob syntax, for example:
```
task_routes = {
'feed.tasks.*': 'feeds',
}
```
Under the hood, [celery's code calls `celery.app.routes.glob_to_re`](https://github.com/celery/celery/blob/2c029c4f33d48c1951cc97628655a04024d50a0d/celery/app/routes.py#L36-L38) to convert these globs to a regular expression. The replacement interprets `*` as the regular expression `.+?`, or, "one or more characters (non-greedy)".
This behavior differs from globbing in other systems, like [bash](https://github.com/celery/celery/blob/2c029c4f33d48c1951cc97628655a04024d50a0d/celery/app/routes.py#L36-L38), [python's glob module](https://docs.python.org/3.5/library/glob.html#glob.glob), [node glob](https://www.npmjs.com/package/glob), and more, which all treat `*` as "zero or more characters".
Given the "zero or more characters" standard, developers will probably expect this pattern:
```
task_routes = {
'feed.tasks.foo*': 'feeds',
}
```
to match the task `feed.tasks.foo`. In celery right now, it does not.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A
* **Minimal Celery Version**: N/A
* **Minimal Kombu Version**: N/A
* **Minimal Broker Version**: N/A
* **Minimal Result Backend Version**: N/A
* **Minimal OS and/or Kernel Version**: N/A
* **Minimal Broker Client Version**: N/A
* **Minimal Result Backend Client Version**: N/A
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
Not relevant for this ticket.
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery.app.routes import glob_to_re
regex = re.compile(glob_to_re('feed.tasks.foo*'))
assert regex.match('feed.tasks.foo')
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
This task route:
```
task_routes = {
'feed.tasks.foo*': 'feeds',
}
```
should match a task named `feed.tasks.foo`, in addition to `feed.tasks.foox`.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
THe task `feed.tasks.foo` does not match, as celery expects an additional character to satisfy its interpretation of a glob `*` as "one or more characters (non-greedy)".
| Weird, the code seems to be: https://github.com/celery/celery/blob/2c029c4f33d48c1951cc97628655a04024d50a0d/celery/app/routes.py#L36-L38
It ends up as `.+?`, which seems like it should have the same behavior as `.*`, wonder why that wasn't used in the first place...
Looks like that function has no tests either. Was added in #1137, and a comment there suggests using `.*`...
Oops, looks like there are tests, but not specifically for that function, just for the `MapRoute` class.
> Weird, the code seems to be:
>
> https://github.com/celery/celery/blob/2c029c4f33d48c1951cc97628655a04024d50a0d/celery/app/routes.py#L36-L38
>
> It ends up as `.+?`, which seems like it should have the same behavior as `.*`, wonder why that wasn't used in the first place...
>
> Looks like that function has no tests either. Was added in #1137, and a comment there suggests using `.*`...
what would be a possible right solution? did you figure anything?
distutils has an implementation of [glob_to_re](https://www.kite.com/python/docs/distutils.filelist.glob_to_re).
Since the distutils module is being deprecated I propose we copy the code (and attribute it correctly) and see what happens.
```python
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
```
It turns out switching to `fnmatch.translate` alone is enough. | 2021-03-09T14:43:41 |
celery/celery | 6,699 | celery__celery-6699 | [
"5469"
] | a78f8cc56a0c1f1536028f17f5ea900240a00816 | diff --git a/celery/utils/functional.py b/celery/utils/functional.py
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -259,11 +259,11 @@ def _argsfromspec(spec, replace_defaults=True):
varargs = spec.varargs
varkw = spec.varkw
if spec.kwonlydefaults:
- split = len(spec.kwonlydefaults)
- kwonlyargs = spec.kwonlyargs[:-split]
+ kwonlyargs = set(spec.kwonlyargs) - set(spec.kwonlydefaults.keys())
if replace_defaults:
kwonlyargs_optional = [
- (kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:])]
+ (kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys())
+ ]
else:
kwonlyargs_optional = list(spec.kwonlydefaults.items())
else:
| diff --git a/t/unit/utils/test_functional.py b/t/unit/utils/test_functional.py
--- a/t/unit/utils/test_functional.py
+++ b/t/unit/utils/test_functional.py
@@ -225,7 +225,6 @@ def f(cls, x):
fun = head_from_fun(A.f, bound=True)
assert fun(1) == 1
- @pytest.mark.xfail(reason="Issue #5469")
def test_kwonly_required_args(self):
local = {}
fun = ('def f_kwargs_required(*, a="a", b, c=None):'
| Error with default kwargs in __header__ attribute generation
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ ] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [ ] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [ ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 4.3.0
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: 3.7.1
* **Minimal Celery Version**: 4.3.0
* **Minimal Kombu Version**: 4.2.1
* **Minimal Broker Version**: redis==2.10.6
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: redis==2.10.6
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==2.2.2
appdirs==1.4.3
appnope==0.1.0
argh==0.26.2
attrs==17.4.0
autoflake==1.1
Babel==2.6.0
backports.shutil-get-terminal-size==1.0.0
billiard==3.5.0.3
bleach==3.0.2
boto3==1.7.22
botocore==1.10.22
cachetools==3.1.0
celery==4.3.0
certifi==2018.1.18
chardet==3.0.4
colorama==0.3.9
contextlib2==0.5.5
coverage==4.5.1
decorator==4.2.1
defusedxml==0.5.0
dj-database-url==0.5.0
Django==2.2
django-admin-rangefilter==0.3.12
django-allauth==0.35.0
django-celery-beat==1.1.1
django-cors-middleware==1.3.1
django-cursor-pagination==0.1.2
django-extensions==2.0.0
django-filter==1.1.0
django-formtools==2.1
django-otp==0.5.0
django-phonenumber-field==1.3.0
django-prometheus==1.0.15
django-two-factor-auth==1.8.0
django-viewflow-pro==1.4.2
djangorestframework==3.7.7
djangorestframework-simplejwt==3.2.2
djproxy==2.3.4
docopt==0.6.2
docutils==0.14
elasticsearch==6.0.0
enum34==1.1.6
ephem==3.7.6.0
factory-boy==2.11.1
Faker==0.8.11
flake8==3.5.0
flake8-isort==2.4
flake8-polyfill==1.0.2
freezegun==0.3.11
gevent==1.3.7
google-api-core==1.7.0
google-auth==1.6.2
googleapis-common-protos==1.5.6
graphene==2.0.1
graphene-django==2.0.0
graphql-core==2.0
graphql-relay==0.4.5
greenlet==0.4.15
grpcio==1.17.1
grpcio-tools==1.17.1
gunicorn==19.9.0
idna==2.6
img2pdf==0.3.2
ipdb==0.11
ipython==6.2.1
ipython-genutils==0.2.0
iso8601==0.1.12
isort==4.3.4
jedi==0.11.1
jmespath==0.9.3
jsonfield==2.0.2
kombu==4.2.1
mccabe==0.6.1
mock==2.0.0
mypy==0.641
mypy-extensions==0.4.1
mypy-protobuf==1.7
numpy==1.16.2
oauthlib==2.0.6
opencensus==0.1.10
packaging==17.1
parso==0.1.1
pathlib2==2.3.0
pathtools==0.1.2
pbr==3.1.1
pdfrw==0.4
permissive-search==0.1.7
pexpect==4.4.0
phonenumberslite==8.10.4
pickleshare==0.7.4
Pillow==5.4.1
pkginfo==1.4.2
pluggy==0.6.0
prometheus-client==0.5.0
promise==2.1
prompt-toolkit==1.0.15
protobuf==3.6.1
psycopg2-binary==2.7.6.1
ptyprocess==0.5.2
py==1.5.2
pyasn1==0.4.5
pyasn1-modules==0.2.4
pycodestyle==2.3.1
pycountry==18.5.26
pycurl==7.43.0.1
pyflakes==1.6.0
Pygments==2.2.0
PyJWT==1.6.1
pyparsing==2.2.0
pytest==3.4.1
pytest-cov==2.5.1
pytest-django==3.1.2
pytest-watch==4.1.0
pythia-python==1.5.2
python-dateutil==2.7.0
python-http-client==3.1.0
python3-openid==3.1.0
pytz==2018.3
PyYAML==3.12
qrcode==6.1
raven==6.6.0
readme-renderer==24.0
redis==2.10.6
reportlab==3.5.13
requests==2.20.0
requests_aws4auth==0.9
requests-mock==1.4.0
requests-oauthlib==0.8.0
requests-toolbelt==0.8.0
rsa==4.0
Rx==1.6.1
s3transfer==0.1.13
scandir==1.7
sendgrid==5.3.0
simplegeneric==0.8.1
singledispatch==3.4.0.3
six==1.11.0
snapshottest==0.5.0
termcolor==1.1.0
testfixtures==5.4.0
text-unidecode==1.2
tqdm==4.19.8
traitlets==4.3.2
twine==1.12.1
typed-ast==1.1.0
typing==3.6.4
ulid==1.1
Unidecode==1.0.22
urllib3==1.22
vine==1.1.4
watchdog==0.8.3
wcwidth==0.1.7
webencodings==0.5.1
whitenoise==3.3.1
wrapt==1.10.11
yapf==0.20.2
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
@celery_app.task(name="some_name")
def some_function(
*,
first_kw: str = "default value 1",
second_kw: str,
third_kw: str,
n_minus_one_kw: str,
last_kw: str = "default value 2"
):
service.make_service(
kw_1=first_kw,
kw_2=second_lw,
kw_3=third_kw,
kw_4=n_minus_one_kw,
kw_5=last_kw,
)
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I expect that my function kwargs with default values will be optional. In example I suppose that first_kw and last_kw will be optional, and other will be required keyword arguments.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```
kwonlyargs_optional = [(kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:])]
```
will return (kw, i) tuple for kwonlyargs in reversed order. But if I will pass default value for first and last element, and didn't provide default value for second, third, ... (n-1)th element, then this kwonlyargs_optional will assign optional parameter for (n-1)th element and latest elements, not for first and last. And it will cause an error for typing checking, if you wouldn't provide explicitly kward arg for first kwarg argument.
Possible solution:
```
kwonlyargs_optional = [(kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys())]
```
| did you find any solution for this?
well, I passed explicitly kwarg's default value and that helped, because I don't want to patch celery.
so what would be the resolution of this issue in your opinion? you want anyone patch/document or close the issue?
I sent CR and we can solve the issue after merging
I added a test for this in master and marked it as `xfail`ed.
I'll try to come up with a better patch. | 2021-03-29T10:55:48 |
celery/celery | 6,710 | celery__celery-6710 | [
"6705"
] | 0953a4d9ecf7008d10236359864d334695cc530c | diff --git a/celery/bin/control.py b/celery/bin/control.py
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -95,7 +95,8 @@ def status(ctx, timeout, destination, json, **kwargs):
nodecount, text.pluralize(nodecount, 'node')))
[email protected](cls=CeleryCommand)
[email protected](cls=CeleryCommand,
+ context_settings={'allow_extra_args': True})
@click.argument("action", type=click.Choice([
name for name, info in Panel.meta.items()
if info.type == 'inspect' and info.visible
@@ -128,9 +129,12 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
"""
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
- replies = ctx.obj.app.control.inspect(timeout=timeout,
+ arguments = _compile_arguments(action, ctx.args)
+ inspect = ctx.obj.app.control.inspect(timeout=timeout,
destination=destination,
- callback=callback)._request(action)
+ callback=callback)
+ replies = inspect._request(action,
+ **arguments)
if not replies:
raise CeleryCommandException(
| Celery inspect expected extra argument
I am trying to inspect a task which I can see in my redis broker as `SUCCESS`, however, it was never logged by celery application.
I then tried inspecting it, but I am unable to do so, getting following error

Output of `celery report`
```
software -> celery:5.0.2 (singularity) kombu:5.0.2 py:3.6.9
billiard:3.6.3.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:5.4.0-1038-aws imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:redis://redis.xyz.service:6379/0
```
| Hey @codeasashu :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
This is indeed a bug.
Thank you! | 2021-04-05T12:46:53 |
|
celery/celery | 6,711 | celery__celery-6711 | [
"6701"
] | ae20f2fcc8553af25f15699fe41a07a3e5db19a8 | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -274,9 +274,10 @@ def __init__(self, main=None, loader=None, backend=None,
self.__autoset('broker_url', broker)
self.__autoset('result_backend', backend)
self.__autoset('include', include)
- self.__autoset('broker_use_ssl', kwargs.get('broker_use_ssl'))
- self.__autoset('redis_backend_use_ssl',
- kwargs.get('redis_backend_use_ssl'))
+
+ for key, value in kwargs.items():
+ self.__autoset(key, value)
+
self._conf = Settings(
PendingConfiguration(
self._preconf, self._finalize_pending_conf),
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -45,8 +45,20 @@ def __init__(self):
def convert(self, value, param, ctx):
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
- return concurrency.get_implementation(
- value) or ctx.obj.app.conf.worker_pool
+ value = super().convert(value, param, ctx)
+ worker_pool = ctx.obj.app.conf.worker_pool
+ if value == 'prefork' and worker_pool:
+ # If we got the default pool through the CLI
+ # we need to check if the worker pool was configured.
+ # If the worker pool was configured, we shouldn't use the default.
+ value = concurrency.get_implementation(worker_pool)
+ else:
+ value = concurrency.get_implementation(value)
+
+ if not value:
+ value = concurrency.get_implementation(worker_pool)
+
+ return value
class Hostname(StringParamType):
@@ -140,7 +152,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'--statedb',
cls=CeleryOption,
type=click.Path(),
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_state_db,
help_group="Worker Options",
help="Path to the state database. The extension '.db' may be "
"appended to the filename.")
@@ -161,7 +174,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
@click.option('--prefetch-multiplier',
type=int,
metavar="<prefetch multiplier>",
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_prefetch_multiplier,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_prefetch_multiplier,
cls=CeleryOption,
help_group="Worker Options",
help="Set custom prefetch multiplier value"
@@ -170,7 +184,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'--concurrency',
type=int,
metavar="<concurrency>",
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_concurrency,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_concurrency,
cls=CeleryOption,
help_group="Pool Options",
help="Number of child processes processing the queue. "
@@ -268,7 +283,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
@click.option('-s',
'--schedule-filename',
'--schedule',
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.beat_schedule_filename,
cls=CeleryOption,
help_group="Embedded Beat Options")
@click.option('--scheduler',
| diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -274,6 +274,10 @@ def test_with_broker(self, patching):
with self.Celery(broker='foo://baribaz') as app:
assert app.conf.broker_url == 'foo://baribaz'
+ def test_pending_confugration__kwargs(self):
+ with self.Celery(foo='bar') as app:
+ assert app.conf.foo == 'bar'
+
def test_pending_configuration__setattr(self):
with self.Celery(broker='foo://bar') as app:
app.conf.task_default_delivery_mode = 44
| app.conf.worker_pool is not honored
The option `app.conf.worker_pool` is not honored. As I've been requested in previous issues to follow the template, I will try to.
# Checklist
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first: I didn't ask. This is a super easily reproducible bug, and I `git bisect` to identify the commit responsible.
- [X] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [X] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [X] I have included the contents of ``pip freeze`` in the issue.
- [X] I have included all the versions of all the external dependencies required
to reproduce this bug.
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
5.0.5 (singularity)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.0.5 (singularity) kombu:5.0.2 py:3.8.2
billiard:3.6.3.0 py-amqp:5.0.5
platform -> system:Linux arch:64bit, ELF
kernel version:4.19.121-linuxkit imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:disabled
deprecated_settings: None
worker_concurrency: 1
worker_pool: 'solo'
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp 5.0.5
billiard 3.6.3.0
celery 5.0.5 /tmp/celery
click 7.1.2
click-didyoumean 0.0.3
click-plugins 1.1.1
click-repl 0.1.6
kombu 5.0.2
pip 21.0.1
prompt-toolkit 3.0.18
pytz 2021.1
setuptools 54.1.2
six 1.15.0
vine 5.0.0
wcwidth 0.2.5
wheel 0.36.2
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery import Celery
app = Celery(
'worker',
)
app.conf.update(
worker_concurrency=1,
worker_pool='solo',
)
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
The banner displayed by `celery -A worker worker` should contain `concurrency: 1 (solo)`
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
The banner contains `concurrency: 1 (prefork)`. The parameter `worker_concurrency` is honored, but `worker_pool` is ignored.
The problem apparently comes from 01651d2f5d9ad20dfb9812d92831510147974b23
| Hey @brmzkw :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-04-05T14:00:28 |
celery/celery | 6,733 | celery__celery-6733 | [
"6721"
] | 230c9acd951dddad0a73ddc5b735f630acdfc12a | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1170,21 +1170,25 @@ def _apply_tasks(self, tasks, producer=None, app=None, p=None,
# we are able to tell when we are at the end by checking if
# next_task is None. This enables us to set the chord size
# without burning through the entire generator. See #3021.
+ chord_size = 0
for task_index, (current_task, next_task) in enumerate(
lookahead(tasks)
):
+ # We expect that each task must be part of the same group which
+ # seems sensible enough. If that's somehow not the case we'll
+ # end up messing up chord counts and there are all sorts of
+ # awful race conditions to think about. We'll hope it's not!
sig, res, group_id = current_task
- _chord = sig.options.get("chord") or chord
- if _chord is not None and next_task is None:
- chord_size = task_index + 1
- if isinstance(sig, _chain):
- if sig.tasks[-1].subtask_type == 'chord':
- chord_size = sig.tasks[-1].__length_hint__()
- else:
- chord_size = task_index + len(sig.tasks[-1])
+ chord_obj = sig.options.get("chord") or chord
+ # We need to check the chord size of each contributing task so
+ # that when we get to the final one, we can correctly set the
+ # size in the backend and the chord can be sensible completed.
+ chord_size += _chord._descend(sig)
+ if chord_obj is not None and next_task is None:
+ # Per above, sanity check that we only saw one group
app.backend.set_chord_size(group_id, chord_size)
sig.apply_async(producer=producer, add_to_parent=False,
- chord=_chord, args=args, kwargs=kwargs,
+ chord=chord_obj, args=args, kwargs=kwargs,
**options)
# adding callback to result, such that it will gradually
# fulfill the barrier.
@@ -1296,8 +1300,8 @@ def app(self):
return app if app is not None else current_app
[email protected]_type()
-class chord(Signature):
[email protected]_type(name="chord")
+class _chord(Signature):
r"""Barrier synchronization primitive.
A chord consists of a header and a body.
@@ -1415,20 +1419,27 @@ def apply(self, args=None, kwargs=None,
)
@classmethod
- def __descend(cls, sig_obj):
+ def _descend(cls, sig_obj):
# Sometimes serialized signatures might make their way here
if not isinstance(sig_obj, Signature) and isinstance(sig_obj, dict):
sig_obj = Signature.from_dict(sig_obj)
if isinstance(sig_obj, group):
# Each task in a group counts toward this chord
subtasks = getattr(sig_obj.tasks, "tasks", sig_obj.tasks)
- return sum(cls.__descend(task) for task in subtasks)
+ return sum(cls._descend(task) for task in subtasks)
elif isinstance(sig_obj, _chain):
- # The last element in a chain counts toward this chord
- return cls.__descend(sig_obj.tasks[-1])
+ # The last non-empty element in a chain counts toward this chord
+ for child_sig in sig_obj.tasks[-1::-1]:
+ child_size = cls._descend(child_sig)
+ if child_size > 0:
+ return child_size
+ else:
+ # We have to just hope this chain is part of some encapsulating
+ # signature which is valid and can fire the chord body
+ return 0
elif isinstance(sig_obj, chord):
# The child chord's body counts toward this chord
- return cls.__descend(sig_obj.body)
+ return cls._descend(sig_obj.body)
elif isinstance(sig_obj, Signature):
# Each simple signature counts as 1 completion for this chord
return 1
@@ -1437,7 +1448,7 @@ def __descend(cls, sig_obj):
def __length_hint__(self):
tasks = getattr(self.tasks, "tasks", self.tasks)
- return sum(self.__descend(task) for task in tasks)
+ return sum(self._descend(task) for task in tasks)
def run(self, header, body, partial_args, app=None, interval=None,
countdown=1, max_retries=None, eager=False,
@@ -1537,6 +1548,11 @@ def _get_app(self, body=None):
body = getitem_property('kwargs.body', 'Body task of chord.')
+# Add a back-compat alias for the previous `chord` class name which conflicts
+# with keyword arguments elsewhere in this file
+chord = _chord
+
+
def signature(varies, *args, **kwargs):
"""Create new signature.
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -704,6 +704,112 @@ def test_nested_group_group(self, manager):
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
+ def test_nested_group_chord_counting_simple(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_sig = identity.si(42)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[42]]
+
+ def test_nested_group_chord_counting_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = chain((identity.si(1337), ) * gchild_count)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[1337]]
+
+ def test_nested_group_chord_counting_group(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = group((identity.si(1337), ) * gchild_count)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[1337] * gchild_count]
+
+ def test_nested_group_chord_counting_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = chord(
+ (identity.si(1337), ) * gchild_count, identity.si(31337),
+ )
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[31337]]
+
+ def test_nested_group_chord_counting_mixed(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ child_chord = chord(
+ (
+ identity.si(42),
+ chain((identity.si(42), ) * gchild_count),
+ group((identity.si(42), ) * gchild_count),
+ chord((identity.si(42), ) * gchild_count, identity.si(1337)),
+ ),
+ identity.s(),
+ )
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected. The
+ # group result gets unrolled into the encapsulating chord, hence the
+ # weird unpacking below
+ assert res.get(timeout=TIMEOUT) == [
+ [42, 42, *((42, ) * gchild_count), 1337]
+ ]
+
+ @pytest.mark.xfail(raises=TimeoutError, reason="#6734")
+ def test_nested_group_chord_body_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_chord = chord(identity.si(42), chain((identity.s(), )))
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # The result can be expected to timeout since it seems like its
+ # underlying promise might not be getting fulfilled (ref #6734). Pick a
+ # short timeout since we don't want to block for ages and this is a
+ # fairly simple signature which should run pretty quickly.
+ expected_result = [[42]]
+ with pytest.raises(TimeoutError) as expected_excinfo:
+ res.get(timeout=TIMEOUT / 10)
+ # Get the child `AsyncResult` manually so that we don't have to wait
+ # again for the `GroupResult`
+ assert res.children[0].get(timeout=TIMEOUT) == expected_result[0]
+ assert res.get(timeout=TIMEOUT) == expected_result
+ # Re-raise the expected exception so this test will XFAIL
+ raise expected_excinfo.value
+
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,5 +1,5 @@
import json
-from unittest.mock import MagicMock, Mock, call, patch, sentinel
+from unittest.mock import MagicMock, Mock, call, patch, sentinel, ANY
import pytest
import pytest_subtests # noqa: F401
@@ -782,6 +782,194 @@ def test_kwargs_delay_partial(self):
res = self.helper_test_get_delay(x.delay(y=1))
assert res == [2, 2]
+ def test_apply_from_generator(self):
+ child_count = 42
+ child_sig = self.add.si(0, 0)
+ child_sigs_gen = (child_sig for _ in range(child_count))
+ group_sig = group(child_sigs_gen)
+ with patch("celery.canvas.Signature.apply_async") as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+
+ # This needs the current app for some reason not worth digging into
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_apply_from_generator_empty(self):
+ empty_gen = (False for _ in range(0))
+ group_sig = group(empty_gen)
+ with patch("celery.canvas.Signature.apply_async") as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ assert mock_apply_async.call_count == 0
+ assert len(res_obj.children) == 0
+
+ # In the following tests, getting the group ID is a pain so we just use
+ # `ANY` to wildcard it when we're checking on calls made to our mocks
+ def test_apply_contains_chord(self):
+ gchild_count = 42
+ gchild_sig = self.add.si(0, 0)
+ gchild_sigs = (gchild_sig, ) * gchild_count
+ child_chord = chord(gchild_sigs, gchild_sig)
+ group_sig = group((child_chord, ))
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == gchild_count
+ assert len(res_obj.children) == len(group_sig.tasks)
+ # We must have set the chord size for the group of tasks which makes up
+ # the header of the `child_chord`, just before we apply the last task.
+ mock_set_chord_size.assert_called_once_with(ANY, gchild_count)
+
+ def test_apply_contains_chords_containing_chain(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = chain((ggchild_sig, ) * ggchild_count)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated chains - in this case 1 for each child chord
+ mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count)
+
+ @pytest.mark.xfail(reason="Invalid canvas setup with bad exception")
+ def test_apply_contains_chords_containing_empty_chain(self):
+ gchild_sig = chain(tuple())
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ # This is an invalid setup because we can't complete a chord header if
+ # there are no actual tasks which will run in it. However, the current
+ # behaviour of an `IndexError` isn't particularly helpful to a user.
+ res_obj = group_sig.apply_async()
+
+ def test_apply_contains_chords_containing_chain_with_empty_tail(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ tail_count = 24
+ gchild_sig = chain(
+ (ggchild_sig, ) * ggchild_count +
+ (group((ggchild_sig, ) * tail_count), group(tuple()), ),
+ )
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ))
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == 1
+ assert len(res_obj.children) == 1
+ # We must have set the chord sizes based on the size of the last
+ # non-empty task in the encapsulated chains - in this case `tail_count`
+ # for the group preceding the empty one in each grandchild chain
+ mock_set_chord_size.assert_called_once_with(ANY, tail_count)
+
+ def test_apply_contains_chords_containing_group(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = group((ggchild_sig, ) * ggchild_count)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We see applies for all of the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count * ggchild_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated groups - in this case `ggchild_count`
+ mock_set_chord_size.assert_has_calls(
+ (call(ANY, ggchild_count), ) * child_count,
+ )
+
+ @pytest.mark.xfail(reason="Invalid canvas setup but poor behaviour")
+ def test_apply_contains_chords_containing_empty_group(self):
+ gchild_sig = group(tuple())
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # This is actually kind of meaningless because, similar to the empty
+ # chain test, this is an invalid setup. However, we should probably
+ # expect that the chords are dealt with in some other way the probably
+ # being left incomplete forever...
+ mock_set_chord_size.assert_has_calls((call(ANY, 0), ) * child_count)
+
+ def test_apply_contains_chords_containing_chord(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = chord((ggchild_sig, ) * ggchild_count, ggchild_sig)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We see applies for all of the header great-grandchildren because the
+ # tasks are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count * ggchild_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the deeply encapsulated chords' header tasks, as well as for each
+ # child chord. This means we have `child_count` interleaved calls to
+ # set chord sizes of 1 and `ggchild_count`.
+ mock_set_chord_size.assert_has_calls(
+ (call(ANY, 1), call(ANY, ggchild_count), ) * child_count,
+ )
+
+ def test_apply_contains_chords_containing_empty_chord(self):
+ gchild_sig = chord(tuple(), self.add.si(0, 0))
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated chains - in this case 1 for each child chord
+ mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count)
+
class test_chord(CanvasCase):
| 5.1.0b1 chord of chains dont work
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [ ] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [ ] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [ ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [ ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: ```5.1.0b1 (singularity)```
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.0b1 (singularity) kombu:5.1.0b1 py:3.8.5
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Linux arch:64bit, ELF
kernel version:5.8.0-48-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://localhost/
broker_url: 'amqp://guest:********@localhost:5672//'
result_backend: 'redis://localhost/'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
import celery
app = celery.Celery(
broker='amqp://localhost',
backend='redis://localhost',
)
@app.task
def task_a(number):
print(f'Task A: {number}')
return number
@app.task
def task_b(number):
print(f'Task B: {number}')
return number
@app.task
def task_c(number):
print(f'Task C: {number}')
if __name__ == '__main__':
celery.chord(
(celery.chain(task_a.s(i), task_b.s()))
for i in range(3)
)(task_c.s()) # yapf: disable
app.start()
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Expected output:
```
[2021-04-11 16:44:51,364: WARNING/ForkPoolWorker-15] Task A: 0
[2021-04-11 16:44:51,364: WARNING/ForkPoolWorker-16] Task A: 1
[2021-04-11 16:44:51,364: WARNING/ForkPoolWorker-1] Task A: 2
[2021-04-11 16:44:51,387: WARNING/ForkPoolWorker-4] Task B: 2
[2021-04-11 16:44:51,387: WARNING/ForkPoolWorker-3] Task B: 0
[2021-04-11 16:44:51,387: WARNING/ForkPoolWorker-2] Task B: 1
[2021-04-11 16:44:51,413: WARNING/ForkPoolWorker-15] Task C: [0, 1, 2]
```
On `celery==5.0.5` it works fine
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```
[2021-04-11 16:33:24,583: WARNING/ForkPoolWorker-15] Task A: 0
[2021-04-11 16:33:24,583: WARNING/ForkPoolWorker-1] Task A: 2
[2021-04-11 16:33:24,583: WARNING/ForkPoolWorker-16] Task A: 1
[2021-04-11 16:33:24,608: WARNING/ForkPoolWorker-4] Task B: 2
[2021-04-11 16:33:24,608: WARNING/ForkPoolWorker-2] Task B: 0
[2021-04-11 16:33:24,608: WARNING/ForkPoolWorker-3] Task B: 1
```
| Hey @Sobes76rus :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
@maybe-sybr If you can't get to it, I will.
I marked it as a blocker for 5.1.0 so we have to do it.
Looks like a bug in chord size counting again. For some reason we've counted 8 chord parts to return which is way off what it should be:
```
[2021-04-13 09:52:54,712: INFO/MainProcess] Connected to amqp://guest:**@127.13.37.1:5672//
[2021-04-13 09:52:54,717: INFO/MainProcess] mingle: searching for neighbors
[2021-04-13 09:52:55,737: INFO/MainProcess] mingle: all alone
[2021-04-13 09:52:55,752: INFO/MainProcess] celery@karma ready.
[2021-04-13 09:52:55,752: INFO/MainProcess] Received task: __main__.task_a[8c3bd499-cf53-4086-8c9d-73cc6bd981f4]
[2021-04-13 09:52:55,753: INFO/MainProcess] Received task: __main__.task_a[664d91de-4430-49ff-8965-476c2b4c80e2]
[2021-04-13 09:52:55,753: INFO/MainProcess] Received task: __main__.task_a[86ae66bb-4f43-46a8-8851-b62674cc3ad7]
[2021-04-13 09:52:55,855: WARNING/ForkPoolWorker-7] Task A: 0
[2021-04-13 09:52:55,855: WARNING/ForkPoolWorker-8] Task A: 1
[2021-04-13 09:52:55,856: WARNING/ForkPoolWorker-1] Task A: 2
[2021-04-13 09:52:55,868: INFO/MainProcess] Received task: __main__.task_b[d3161d3c-ab8f-4610-83ff-24fd3d12f538]
[2021-04-13 09:52:55,868: INFO/ForkPoolWorker-8] Task __main__.task_a[664d91de-4430-49ff-8965-476c2b4c80e2] succeeded in 0.013156214000446198s: 1
[2021-04-13 09:52:55,868: INFO/MainProcess] Received task: __main__.task_b[44dec7e8-152e-4e11-bd70-ce4e7cb8c84b]
[2021-04-13 09:52:55,868: INFO/ForkPoolWorker-7] Task __main__.task_a[8c3bd499-cf53-4086-8c9d-73cc6bd981f4] succeeded in 0.013253123000140477s: 0
[2021-04-13 09:52:55,868: INFO/ForkPoolWorker-1] Task __main__.task_a[86ae66bb-4f43-46a8-8851-b62674cc3ad7] succeeded in 0.01293888700001844s: 2
[2021-04-13 09:52:55,868: INFO/MainProcess] Received task: __main__.task_b[6da81e65-af87-4770-92fd-4ce3ce9e5937]
[2021-04-13 09:52:55,869: WARNING/ForkPoolWorker-2] Task B: 0
[2021-04-13 09:52:55,869: WARNING/ForkPoolWorker-3] Task B: 1
[2021-04-13 09:52:55,869: WARNING/ForkPoolWorker-4] Task B: 2
[2021-04-13 09:52:55,871: WARNING/ForkPoolWorker-2] OCPR: chord_size_bytes=b'8' ready=1
[2021-04-13 09:52:55,871: WARNING/ForkPoolWorker-4] OCPR: chord_size_bytes=b'8' ready=2
[2021-04-13 09:52:55,871: WARNING/ForkPoolWorker-3] OCPR: chord_size_bytes=b'8' ready=3
[2021-04-13 09:52:55,871: INFO/ForkPoolWorker-2] Task __main__.task_b[d3161d3c-ab8f-4610-83ff-24fd3d12f538] succeeded in 0.001984218999496079s: 0
[2021-04-13 09:52:55,871: INFO/ForkPoolWorker-4] Task __main__.task_b[6da81e65-af87-4770-92fd-4ce3ce9e5937] succeeded in 0.0020933550003974233s: 2
[2021-04-13 09:52:55,871: INFO/ForkPoolWorker-3] Task __main__.task_b[44dec7e8-152e-4e11-bd70-ce4e7cb8c84b] succeeded in 0.002132425999661791s: 1
```
Counting of chord size children of groups is busted.
https://github.com/celery/celery/blob/1901ea8594185c015d1518d89f3b90180275c0b9/celery/canvas.py#L1178-L1185
The else case here is counting the length of a simple signature which is the length of its top level attributes (args, kwargs, and friends). Running the MRTC with the following diff on celery master:
```diff
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 74a2e18b5..c642fe88e 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -415,6 +415,7 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
return retval
def set_chord_size(self, group_id, chord_size):
+ raise Exception(chord_size)
self.set(self.get_key_for_group(group_id, '.s'), chord_size)
def apply_chord(self, header_result_args, body, **kwargs):
@@ -467,6 +468,7 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
_, readycount, totaldiff, chord_size_bytes = pipeline.execute()[:4]
totaldiff = int(totaldiff or 0)
+ print(f"OCPR: chord_size_bytes={chord_size_bytes} ready={readycount}")
if chord_size_bytes:
try:
diff --git a/celery/canvas.py b/celery/canvas.py
index 57b0aea06..8aa533f43 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1182,6 +1182,9 @@ class group(Signature):
chord_size = sig.tasks[-1].__length_hint__()
else:
chord_size = task_index + len(sig.tasks[-1])
+ print(f"subtask is not chord -> {chord_size=}")
+ print(f"{len(sig.tasks[-1])=}")
+ print(f"{list(sig.tasks[-1])=}")
app.backend.set_chord_size(group_id, chord_size)
sig.apply_async(producer=producer, add_to_parent=False,
chord=_chord, args=args, kwargs=kwargs,
```
->
```
subtask is not chord -> chord_size=8
len(sig.tasks[-1])=6
list(sig.tasks[-1])=['task', 'args', 'kwargs', 'options', 'subtask_type', 'immutable']
Traceback (most recent call last):
File "/home/maybe/dev/foss/github/celery/scratch/6721/app.py", line 27, in <module>
celery.chord(
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1354, in __call__
return self.apply_async((), {'body': body} if body else {}, **options)
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1407, in apply_async
return self.run(tasks, body, args, task_id=task_id, **merged_options)
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1474, in run
header_result = header(*partial_args, task_id=group_id, **options)
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1064, in __call__
return self.apply_async(partial_args, **options)
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1089, in apply_async
results = list(self._apply_tasks(tasks, producer, app, p,
File "/home/maybe/dev/foss/github/celery/celery/canvas.py", line 1188, in _apply_tasks
app.backend.set_chord_size(group_id, chord_size)
File "/home/maybe/dev/foss/github/celery/celery/backends/redis.py", line 418, in set_chord_size
raise Exception(chord_size)
Exception: 8
```
It looks like the else case on L1183 is assuming that the trailing task of the signature being checked (which is a chain since we passed the condition on L1180) is also a chain. In the MRTC the trailing task is a simple signature!
Couple of takeaways:
* Using string equality on `subtask_type` is highly suspicious first of all. We should probably be doing an isinstance check since I think the objects we're touch at this stage are well formed `Signature` instances (otherwise `.subtask_type` would blow up).
* But the actual fix for this is likely to be to modify that conditional to correctly handle a tail task which is a simple signature in the else case, and add a preceding elif to handle when the trailing task is a chain
I'll try to find some time to get a patch up this evening. We'll also want to add a couple of regression tests. This should affect all backends with chords since it's a counting error so unit tests and a chord supporting backend integration test would be nice.
*Edit:* FTR the broken lines were added in #6576 but IDK if it functioned as expected before then
```diff
diff --git a/celery/canvas.py b/celery/canvas.py
index 57b0aea06..e3e8e3d45 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1180,7 +1180,7 @@ class group(Signature):
if isinstance(sig, _chain):
if sig.tasks[-1].subtask_type == 'chord':
chord_size = sig.tasks[-1].__length_hint__()
- else:
+ elif sig.tasks[-1].subtask_type == "group":
chord_size = task_index + len(sig.tasks[-1])
app.backend.set_chord_size(group_id, chord_size)
sig.apply_async(producer=producer, add_to_parent=False,
```
This diff does appear to run the MRTC as expected ->
```
[2021-04-13 16:11:47,828: INFO/MainProcess] celery@karma ready.
[2021-04-13 16:11:47,828: INFO/MainProcess] Received task: __main__.task_a[65331ad0-7ace-4939-b451-f3eda9ec157c]
[2021-04-13 16:11:47,829: INFO/MainProcess] Received task: __main__.task_a[c3524b98-ae11-441d-b68d-fa895253dea0]
[2021-04-13 16:11:47,829: INFO/MainProcess] Received task: __main__.task_a[5c6fe9ee-5a28-4571-8c09-57540503aeb0]
[2021-04-13 16:11:47,931: WARNING/ForkPoolWorker-8] Task A: 1
[2021-04-13 16:11:47,931: WARNING/ForkPoolWorker-7] Task A: 0
[2021-04-13 16:11:47,932: WARNING/ForkPoolWorker-1] Task A: 2
[2021-04-13 16:11:47,942: INFO/MainProcess] Received task: __main__.task_b[228c3d19-4a26-4e9f-9613-57c095cb0078]
[2021-04-13 16:11:47,943: INFO/ForkPoolWorker-7] Task __main__.task_a[65331ad0-7ace-4939-b451-f3eda9ec157c] succeeded in 0.011826527999801328s: 0
[2021-04-13 16:11:47,943: INFO/MainProcess] Received task: __main__.task_b[3fe44fe4-47a2-4b41-adac-cf0ea7e7cf44]
[2021-04-13 16:11:47,943: INFO/ForkPoolWorker-8] Task __main__.task_a[c3524b98-ae11-441d-b68d-fa895253dea0] succeeded in 0.011867351000546478s: 1
[2021-04-13 16:11:47,944: WARNING/ForkPoolWorker-2] Task B: 0
[2021-04-13 16:11:47,944: WARNING/ForkPoolWorker-3] Task B: 1
[2021-04-13 16:11:47,945: INFO/MainProcess] Received task: __main__.task_b[abe8305a-7c95-43f2-a519-074b0e36959b]
[2021-04-13 16:11:47,945: WARNING/ForkPoolWorker-3] OCPR: chord_size_bytes=b'3' ready=2
[2021-04-13 16:11:47,945: WARNING/ForkPoolWorker-2] OCPR: chord_size_bytes=b'3' ready=1
[2021-04-13 16:11:47,945: INFO/ForkPoolWorker-1] Task __main__.task_a[5c6fe9ee-5a28-4571-8c09-57540503aeb0] succeeded in 0.014302862000477035s: 2
[2021-04-13 16:11:47,946: INFO/ForkPoolWorker-3] Task __main__.task_b[3fe44fe4-47a2-4b41-adac-cf0ea7e7cf44] succeeded in 0.0021919120008533355s: 1
[2021-04-13 16:11:47,946: WARNING/ForkPoolWorker-7] Task B: 2
[2021-04-13 16:11:47,946: INFO/ForkPoolWorker-2] Task __main__.task_b[228c3d19-4a26-4e9f-9613-57c095cb0078] succeeded in 0.002304372999788029s: 0
[2021-04-13 16:11:47,946: WARNING/ForkPoolWorker-7] OCPR: chord_size_bytes=b'3' ready=3
[2021-04-13 16:11:47,948: INFO/ForkPoolWorker-7] Task __main__.task_b[abe8305a-7c95-43f2-a519-074b0e36959b] succeeded in 0.0020840869983658195s: 2
[2021-04-13 16:11:47,948: INFO/MainProcess] Received task: __main__.task_c[28abaab2-7c2a-48c3-a83a-d099d678cbbf]
[2021-04-13 16:11:47,948: WARNING/ForkPoolWorker-7] Task C: [0, 1, 2]
[2021-04-13 16:11:47,949: INFO/ForkPoolWorker-7] Task __main__.task_c[28abaab2-7c2a-48c3-a83a-d099d678cbbf] succeeded in 0.0005527139983314555s: None
^C
worker: Hitting Ctrl+C again will terminate all running tasks!
```
The logic is this block is actually a little more tortured than I thought. It's re-writing the chord size each time it loops but I think it needs to do this in an additive way which it doesn't appear to be doing, as well as possibly handling tail groups and chords but not chains. We'll have to think about this pretty carefully, like usual with these chord counting issues :/
Thank you for the analysis, @maybe-sybr. ❤️
Would you be able to come up with a fix?
> Thank you for the analysis, @maybe-sybr. heart
> Would you be able to come up with a fix?
I should have time to sit down and work through this is in the next day or two, yeah. I've actually found another edge case which might be related to this code and is affecting my own work so I'll be able to spend some time digging into it and hopefully get a PR up before this weekend.
This is on my list for today. | 2021-04-19T02:13:51 |
celery/celery | 6,741 | celery__celery-6741 | [
"6726"
] | 25f6c139e11edd32f5c36542c737ee7c7de2e9cc | diff --git a/celery/bin/amqp.py b/celery/bin/amqp.py
--- a/celery/bin/amqp.py
+++ b/celery/bin/amqp.py
@@ -25,6 +25,10 @@ def __init__(self, cli_context):
self.connection = self.cli_context.app.connection()
self.channel = None
self.reconnect()
+
+ @property
+ def app(self):
+ return self.cli_context.app
def respond(self, retval):
if isinstance(retval, str):
| celery amqp repl broken in celery 5.0.3+
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.0.5 (singularity) kombu:5.0.2 py:3.6.9
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Linux arch:64bit, ELF
kernel version:4.15.0-140-generic imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:mongodb+srv://md_app_user:**@md-mongo.privatecircle.co/master_docs
accept_content: ['json']
broker_url: 'amqp://md_app_user:********@****************:5672//master_docs'
default_timezone: 'Asia/Kolkata'
imports: ['tasks']
result_backend: 'mongodb+srv://md_app_user:********@******************/master_docs'
result_serializer: 'json'
task_serializer: 'json'
timezone: 'Asia/Kolkata'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: 3.6.9
* **Minimal Celery Version**: 5.0.3
* **Minimal Kombu Version**: 5.0.2
* **Minimal Broker Version**: RabbitMQ 3.8.11
* **Minimal Result Backend Version**: Mongo 4.4
* **Minimal OS and/or Kernel Version**: Ubuntu 18.04.5 (Linux kernel 4.15.0-140)
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.6
backcall==0.2.0
billiard==3.6.4.0
boto3==1.17.51
botocore==1.20.51
cached-property==1.5.2
cchardet==2.1.7
celery==5.0.5
certifi==2020.12.5
chardet==4.0.0
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.1.6
decorator==5.0.6
dnspython==2.1.0
idna==2.10
importlib-metadata==3.10.0
ipython==7.16.1
ipython-genutils==0.2.0
jedi==0.17.2
jmespath==0.10.0
kombu==5.0.2
lxml==4.6.3
parso==0.7.1
pexpect==4.8.0
pickleshare==0.7.5
prompt-toolkit==3.0.18
ptyprocess==0.7.0
Pygments==2.8.1
pymongo==3.11.3
python-dateutil==2.8.1
python-magic==0.4.22
pytz==2021.1
requests==2.25.1
s3transfer==0.3.6
six==1.15.0
traitlets==4.3.3
typing-extensions==3.7.4.3
urllib3==1.26.4
vine==5.0.0
wcwidth==0.2.5
zipp==3.4.1
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
# test.py
import celery
app = celery.Celery('proj')
```
```shell
$ celery -A test amqp repl
> exchange.declare
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
The AMQP interactive shell should accept this command and execute it and then prompt for the next command.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```
Traceback (most recent call last):
File "/home/privatecircle/.virtualenvs/mca_document_manager/bin/celery", line 8, in <module>
sys.exit(main())
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/celery/__main__.py", line 15, in main
sys.exit(_main())
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/celery/bin/celery.py", line 213, in main
return celery(auto_envvar_prefix="CELERY")
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click_repl/__init__.py", line 248, in repl
group.invoke(ctx)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 1256, in invoke
Command.invoke(self, ctx)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/privatecircle/.virtualenvs/mca_document_manager/lib/python3.6/site-packages/celery/bin/base.py", line 120, in caller
app = ctx.obj.app
AttributeError: 'AMQPContext' object has no attribute 'app'
```
| Hey @parthjoshi2007 :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
you should try a more recent release on pypi as many cli bug was fixed in newer releases
or celery 5.1.0b1
I tried it on the latest code in the `master` branch as required for filing the bug report and the error is still there. The error was introduced in celery 5.0.3. For now, to get the shell working I've downgraded to 5.0.2 | 2021-04-24T08:59:32 |
|
celery/celery | 6,746 | celery__celery-6746 | [
"6220"
] | 426a8f97e9f7dd19905ec624182b6d4a61bc245e | diff --git a/celery/backends/base.py b/celery/backends/base.py
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -22,6 +22,7 @@
import celery.exceptions
from celery import current_app, group, maybe_signature, states
from celery._state import get_current_task
+from celery.app.task import Context
from celery.exceptions import (BackendGetMetaError, BackendStoreError,
ChordError, ImproperlyConfigured,
NotRegistered, TaskRevokedError, TimeoutError)
@@ -170,8 +171,44 @@ def mark_as_failure(self, task_id, exc,
self.store_result(task_id, exc, state,
traceback=traceback, request=request)
if request:
+ # This task may be part of a chord
if request.chord:
self.on_chord_part_return(request, state, exc)
+ # It might also have chained tasks which need to be propagated to,
+ # this is most likely to be exclusive with being a direct part of a
+ # chord but we'll handle both cases separately.
+ #
+ # The `chain_data` try block here is a bit tortured since we might
+ # have non-iterable objects here in tests and it's easier this way.
+ try:
+ chain_data = iter(request.chain)
+ except (AttributeError, TypeError):
+ chain_data = tuple()
+ for chain_elem in chain_data:
+ chain_elem_opts = chain_elem['options']
+ # If the state should be propagated, we'll do so for all
+ # elements of the chain. This is only truly important so
+ # that the last chain element which controls completion of
+ # the chain itself is marked as completed to avoid stalls.
+ if self.store_result and state in states.PROPAGATE_STATES:
+ try:
+ chained_task_id = chain_elem_opts['task_id']
+ except KeyError:
+ pass
+ else:
+ self.store_result(
+ chained_task_id, exc, state,
+ traceback=traceback, request=chain_elem
+ )
+ # If the chain element is a member of a chord, we also need
+ # to call `on_chord_part_return()` as well to avoid stalls.
+ if 'chord' in chain_elem_opts:
+ failed_ctx = Context(chain_elem)
+ failed_ctx.update(failed_ctx.options)
+ failed_ctx.id = failed_ctx.options['task_id']
+ failed_ctx.group = failed_ctx.options['group_id']
+ self.on_chord_part_return(failed_ctx, state, exc)
+ # And finally we'll fire any errbacks
if call_errbacks and request.errbacks:
self._call_task_errbacks(request, exc, traceback)
diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1134,7 +1134,14 @@ def link_error(self, sig):
# pass a Mock object as argument.
sig['immutable'] = True
sig = Signature.from_dict(sig)
- return self.tasks[0].link_error(sig)
+ # Any child task might error so we need to ensure that they are all
+ # capable of calling the linked error signature. This opens the
+ # possibility that the task is called more than once but that's better
+ # than it not being called at all.
+ #
+ # We return a concretised tuple of the signatures actually applied to
+ # each child task signature, of which there might be none!
+ return tuple(child_task.link_error(sig) for child_task in self.tasks)
def _prepared(self, tasks, partial_args, group_id, root_id, app,
CallableSignature=abstract.CallableSignature,
@@ -1179,7 +1186,7 @@ def _apply_tasks(self, tasks, producer=None, app=None, p=None,
# end up messing up chord counts and there are all sorts of
# awful race conditions to think about. We'll hope it's not!
sig, res, group_id = current_task
- chord_obj = sig.options.get("chord") or chord
+ chord_obj = chord if chord is not None else sig.options.get("chord")
# We need to check the chord size of each contributing task so
# that when we get to the final one, we can correctly set the
# size in the backend and the chord can be sensible completed.
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -223,6 +223,13 @@ def redis_echo(message):
redis_connection.rpush('redis-echo', message)
+@shared_task
+def redis_count():
+ """Task that increments a well-known redis key."""
+ redis_connection = get_redis_connection()
+ redis_connection.incr('redis-count')
+
+
@shared_task(bind=True)
def second_order_replace1(self, state=False):
redis_connection = get_redis_connection()
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1,9 +1,11 @@
import re
import tempfile
+import uuid
from datetime import datetime, timedelta
from time import sleep
import pytest
+import pytest_subtests # noqa: F401
from celery import chain, chord, group, signature
from celery.backends.base import BaseKeyValueStoreBackend
@@ -17,7 +19,7 @@
add_to_all, add_to_all_to_chord, build_chain_inside_task,
chord_error, collect_ids, delayed_sum,
delayed_sum_with_soft_guard, fail, identity, ids,
- print_unicode, raise_error, redis_echo,
+ print_unicode, raise_error, redis_count, redis_echo,
replace_with_chain, replace_with_chain_which_raises,
replace_with_empty_chain, retry_once, return_exception,
return_priority, second_order_replace1, tsum,
@@ -810,6 +812,109 @@ def test_nested_group_chord_body_chain(self, manager):
# Re-raise the expected exception so this test will XFAIL
raise expected_excinfo.value
+ def test_callback_called_by_group(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ callback_msg = str(uuid.uuid4()).encode()
+ callback = redis_echo.si(callback_msg)
+
+ group_sig = group(identity.si(42), identity.si(1337))
+ group_sig.link(callback)
+ redis_connection.delete("redis-echo")
+ with subtests.test(msg="Group result is returned"):
+ res = group_sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 1337]
+ with subtests.test(msg="Callback is called after group is completed"):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Callback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == callback_msg
+
+ def test_errback_called_by_group_fail_first(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+
+ group_sig = group(fail.s(), identity.si(42))
+ group_sig.link_error(errback)
+ redis_connection.delete("redis-echo")
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_errback_called_by_group_fail_last(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+
+ group_sig = group(identity.si(42), fail.s())
+ group_sig.link_error(errback)
+ redis_connection.delete("redis-echo")
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_errback_called_by_group_fail_multiple(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ expected_errback_count = 42
+ errback = redis_count.si()
+
+ # Include a mix of passing and failing tasks
+ group_sig = group(
+ *(identity.si(42) for _ in range(24)), # arbitrary task count
+ *(fail.s() for _ in range(expected_errback_count)),
+ )
+ group_sig.link_error(errback)
+ redis_connection.delete("redis-count")
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ check_interval = 0.1
+ check_max = int(TIMEOUT * check_interval)
+ for i in range(check_max + 1):
+ maybe_count = redis_connection.get("redis-count")
+ # It's either `None` or a base-10 integer
+ count = int(maybe_count or b"0")
+ if count == expected_errback_count:
+ # escape and pass
+ break
+ elif i < check_max:
+ # try again later
+ sleep(check_interval)
+ else:
+ # fail
+ assert count == expected_errback_count
+ else:
+ raise TimeoutError("Errbacks were not called in time")
+
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
@@ -1406,6 +1511,335 @@ def test_error_propagates_from_chord2(self, manager):
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
+ def test_error_propagates_to_chord_from_simple(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = fail.s()
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(msg="Error propagates from simple header task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(msg="Error propagates from simple body task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_errback_called_by_chord_from_simple(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+ child_sig = fail.s()
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from simple header task"):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple header task fails"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from simple body task"):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple body task fails"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_error_propagates_to_chord_from_chain(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = chain(identity.si(42), fail.s(), identity.si(42))
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(
+ msg="Error propagates from header chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(
+ msg="Error propagates from body chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_errback_called_by_chord_from_chain(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+ child_sig = chain(identity.si(42), fail.s(), identity.si(42))
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ with subtests.test(
+ msg="Error propagates from header chain which fails before the end"
+ ):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails before the end"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ with subtests.test(
+ msg="Error propagates from body chain which fails before the end"
+ ):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after body chain which fails before the end"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_error_propagates_to_chord_from_chain_tail(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = chain(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(
+ msg="Error propagates from body chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_errback_called_by_chord_from_chain_tail(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+ child_sig = chain(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails at the end"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ with subtests.test(
+ msg="Error propagates from body chain which fails at the end"
+ ):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after body chain which fails at the end"
+ ):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_error_propagates_to_chord_from_group(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = group(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(msg="Error propagates from header group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_errback_called_by_chord_from_group(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ errback = redis_echo.si(errback_msg)
+ child_sig = group(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from header group"):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from body group"):
+ redis_connection.delete("redis-echo")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError("Errback was not called in time")
+ _, msg = maybe_key_msg
+ assert msg == errback_msg
+
+ def test_errback_called_by_chord_from_group_fail_multiple(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ fail_task_count = 42
+ errback = redis_count.si()
+ # Include a mix of passing and failing tasks
+ child_sig = group(
+ *(identity.si(42) for _ in range(24)), # arbitrary task count
+ *(fail.s() for _ in range(fail_task_count)),
+ )
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from header group"):
+ redis_connection.delete("redis-count")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ # NOTE: Here we only expect the errback to be called once since it
+ # is attached to the chord body which is a single task!
+ expected_errback_count = 1
+ check_interval = 0.1
+ check_max = int(TIMEOUT * check_interval)
+ for i in range(check_max + 1):
+ maybe_count = redis_connection.get("redis-count")
+ # It's either `None` or a base-10 integer
+ count = int(maybe_count or b"0")
+ if count == expected_errback_count:
+ # escape and pass
+ break
+ elif i < check_max:
+ # try again later
+ sleep(check_interval)
+ else:
+ # fail
+ assert count == expected_errback_count
+ else:
+ raise TimeoutError("Errbacks were not called in time")
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ with subtests.test(msg="Error propagates from body group"):
+ redis_connection.delete("redis-count")
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ # NOTE: Here we expect the errback to be called once per failing
+ # task in the chord body since it is a group
+ expected_errback_count = fail_task_count
+ check_interval = 0.1
+ check_max = int(TIMEOUT * check_interval)
+ for i in range(check_max + 1):
+ maybe_count = redis_connection.get("redis-count")
+ # It's either `None` or a base-10 integer
+ count = int(maybe_count or b"0")
+ if count == expected_errback_count:
+ # escape and pass
+ break
+ elif i < check_max:
+ # try again later
+ sleep(check_interval)
+ else:
+ # fail
+ assert count == expected_errback_count
+ else:
+ raise TimeoutError("Errbacks were not called in time")
+
class test_signature_serialization:
"""
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,5 +1,5 @@
import json
-from unittest.mock import MagicMock, Mock, call, patch, sentinel, ANY
+from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel
import pytest
import pytest_subtests # noqa: F401
| Chain-in-chord that fails before the last task in the chain doesn't trigger chord error handling
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [ ] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [x] I have tried reproducing the issue on more than one workers pool.
- [x] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
Confirmed to exist on at least celery 4.3.0 as well.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #3709 is related to error handling for chords, though this is more of an edge case in which the behavior is problematic, rather than just misalignment of docs and actual behavior.
#### Possible Duplicates
- There are several other issues that deal with chains-in-chords, e.g. #5229 seems similar, but nothing that addresses this exact problem. I don't think this duplicates them, but I haven't dug into the celery code enough to be sure that they aren't caused the the same underlying misbehavior in the redis backend.
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 4.4.6 (cliffs)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:4.4.6 (cliffs) kombu:4.6.11 py:3.6.9
billiard:3.6.3.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:4.4.0-17763-Microsoft imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:redis://localhost:6379/
broker_url: 'redis://localhost:6379//'
result_backend: 'redis://localhost:6379/'
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
Not sure that any of these are minimal, but I'll include what I can test locally:
* **Minimal Python Version**: 3.6
* **Minimal Celery Version**: 4.3
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: 6.0.4
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==2.6.0
billiard==3.6.3.0
celery==4.4.6
future==0.18.2
importlib-metadata==1.7.0
kombu==4.6.11
pkg-resources==0.0.0
pytz==2020.1
redis==3.5.3
vine==1.3.0
zipp==3.1.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
Output of `redis-server --version`, in case it's useful:
```
Redis server v=6.0.4 sha=00000000:0 malloc=jemalloc-5.1.0 bits=64 build=d5d470262d47f1a0
```
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
Basic test case below. I'm still working on converting this to an integration test for your test suite.
```python
from celery import Celery
app = Celery('task_fail', backend='redis://localhost:6379', broker='redis://localhost:6379')
@app.task
def simple_pass():
print("Passing")
pass
@app.task
def simple_fail():
print("Failing!")
raise Exception('Fail!')
@app.task
def record_failure(*args, **kwargs):
print(f'Fail with args={args}, kwargs={kwargs}')
if __name__ == "__main__":
from celery import chord
# Test 1: Passes - When the chain fails at the end, the failure logic on
# the chord is executed.
try:
chain = simple_pass.si() | simple_fail.si()
~chord([simple_pass.si(), chain],
simple_pass.si().on_error(record_failure.s(test="test_1")))
except Exception:
print("Test 1 worked")
# Test 2: Fails - When the chain has an intermediate exception, the failure
# logic on the chord is not executed & the client simply hangs.
try:
chain = simple_pass.si() | simple_fail.si() | simple_pass.si()
~chord([simple_pass.si(), chain],
simple_pass.si().on_error(record_failure.s(test="test_2")))
except Exception:
print("Test 2 worked")
```
Note that the only difference between test 1 and test 2 is that test 2 add the `simple_pass.si()` call to the end of the chain. That seems to be enough to trigger this error.
</p>
</details>
# Expected Behavior
Test 2 should behave the same way that Test 1 does. Output of Test 1, with some simplification:
```
[2020-07-10 15:28:52,940: INFO/MainProcess] Received task: task_fail.simple_pass[65241973-d598-4bbe-a0c8-1f86694c9f33]
[2020-07-10 15:28:52,945: WARNING/ForkPoolWorker-8] Passing
[2020-07-10 15:28:52,954: INFO/ForkPoolWorker-8] Task task_fail.simple_pass[65241973-d598-4bbe-a0c8-1f86694c9f33] succeeded in 0.008173699956387281s: None
[2020-07-10 15:28:52,967: INFO/MainProcess] Received task: task_fail.simple_pass[fc15c163-0616-4b87-94c3-2c1b225f2e0e]
[2020-07-10 15:28:52,972: WARNING/ForkPoolWorker-8] Passing
[2020-07-10 15:28:52,985: INFO/ForkPoolWorker-8] Task task_fail.simple_pass[fc15c163-0616-4b87-94c3-2c1b225f2e0e] succeeded in 0.013192899990826845s: None
[2020-07-10 15:28:52,985: INFO/MainProcess] Received task: task_fail.simple_fail[0b0a3925-7ea8-4423-8e87-3a9f00d5c967]
[2020-07-10 15:28:52,992: WARNING/ForkPoolWorker-8] Failing!
[2020-07-10 15:28:53,010: ERROR/ForkPoolWorker-8] Chord callback for '76840678-80a3-4915-96b8-04c135ecb237' raised: ChordError("Dependency 0b0a3925-7ea8-4423-8e87-3a9f00d5c967 raised Exception('Fail!',)",)
Traceback (most recent call last):
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "~/celery/task_fail.py", line 13, in simple_fail
raise Exception('Fail!')
Exception: Fail!
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/redis.py", line 449, in on_chord_part_return
callback.delay([unpack(tup, decode) for tup in resl])
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/redis.py", line 449, in <listcomp>
callback.delay([unpack(tup, decode) for tup in resl])
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/redis.py", line 402, in _unpack_chord_result
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
celery.exceptions.ChordError: Dependency 0b0a3925-7ea8-4423-8e87-3a9f00d5c967 raised Exception('Fail!',)
[2020-07-10 15:28:53,022: INFO/MainProcess] Received task: task_fail.record_failure[5f264f5d-a330-4f01-9274-662474d31a8f]
[2020-07-10 15:28:53,026: ERROR/ForkPoolWorker-8] Task task_fail.simple_fail[0b0a3925-7ea8-4423-8e87-3a9f00d5c967] raised unexpected: Exception('Fail!',)
Traceback (most recent call last):
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "~/celery/task_fail.py", line 13, in simple_fail
raise Exception('Fail!')
Exception: Fail![2020-07-10 15:28:53,028: WARNING/ForkPoolWorker-1] Fail with args=('bc3aeb74-68c1-40e9-9c55-be9fb7860786',), kwargs={'test': 'test_1'}
[2020-07-10 15:28:53,036: INFO/ForkPoolWorker-1] Task task_fail.record_failure[5f264f5d-a330-4f01-9274-662474d31a8f] succeeded in 0.007674199994653463s: None
```
The most important thing to notice here is that the `record_failure` task is called & logs the failure, including the ID of the failing task. I expect this behavior to occur in both cases.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
The output of the worker on Test 2 is:
```
[2020-07-10 15:28:55,039: INFO/MainProcess] Received task: task_fail.simple_pass[5779ce6a-64af-40ea-9b26-8ff3bdad348d]
[2020-07-10 15:28:55,050: INFO/MainProcess] Received task: task_fail.simple_pass[39469dfa-401c-45f0-9314-5849f6e7ca5f]
[2020-07-10 15:28:55,051: WARNING/ForkPoolWorker-8] Passing
[2020-07-10 15:28:55,051: WARNING/ForkPoolWorker-1] Passing
[2020-07-10 15:28:55,058: INFO/ForkPoolWorker-8] Task task_fail.simple_pass[5779ce6a-64af-40ea-9b26-8ff3bdad348d] succeeded in 0.007474799989722669s: None
[2020-07-10 15:28:55,062: INFO/ForkPoolWorker-1] Task task_fail.simple_pass[39469dfa-401c-45f0-9314-5849f6e7ca5f] succeeded in 0.010612400015816092s: None
[2020-07-10 15:28:55,069: INFO/MainProcess] Received task: task_fail.simple_fail[2fead196-cfe2-4ce5-896f-303c2f9970e2]
[2020-07-10 15:28:55,073: WARNING/ForkPoolWorker-8] Failing!
[2020-07-10 15:28:55,081: ERROR/ForkPoolWorker-8] Task task_fail.simple_fail[2fead196-cfe2-4ce5-896f-303c2f9970e2] raised unexpected: Exception('Fail!',)
Traceback (most recent call last):
File "/home/crawjor/test/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/crawjor/test/celery/celery.venv/lib/python3.6/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "/home/crawjor/test/celery/task_fail.py", line 13, in simple_fail
raise Exception('Fail!')
Exception: Fail!
```
Then no more progress is made. Note that neither the error handler nor the callback is ever invoked, despite the chain failing (which should trigger failure handlers in the chord).
The client side also hangs waiting on redis:
```
File "task_fail.py", line 40, in <module>
simple_pass.si().on_error(record_failure.s(test="test_2")))
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/canvas.py", line 475, in __invert__
return self.apply_async().get()
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/result.py", line 237, in get
on_message=on_message,
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/asynchronous.py", line 200, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/asynchronous.py", line 268, in _wait_for_pending
on_interval=on_interval):
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/asynchronous.py", line 55, in drain_events_until
yield self.wait_for(p, wait, timeout=interval)
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/asynchronous.py", line 64, in wait_for
wait(timeout=timeout)
File "~/celery/celery.venv/lib/python3.6/site-packages/celery/backends/redis.py", line 161, in drain_events
message = self._pubsub.get_message(timeout=timeout)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/client.py", line 3617, in get_message
response = self.parse_response(block=False, timeout=timeout)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/client.py", line 3503, in parse_response
if not block and not conn.can_read(timeout=timeout):
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/connection.py", line 734, in can_read
return self._parser.can_read(timeout)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/connection.py", line 321, in can_read
return self._buffer and self._buffer.can_read(timeout)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/connection.py", line 231, in can_read
raise_on_timeout=False)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/connection.py", line 198, in _read_from_socket
data = recv(self._sock, socket_read_size)
File "~/celery/celery.venv/lib/python3.6/site-packages/redis/_compat.py", line 72, in recv
```
There are no active, revoked, scheduled, or reserved tasks, and the queue is empty:
```
~/celery$ celery -A task_fail inspect active
-> celery: OK
- empty -
~/celery$ celery -A task_fail inspect scheduled
-> celery: OK
- empty -
~/celery$ celery -A task_fail inspect revoked
-> celery: OK
- empty -
~/celery$ celery -A task_fail inspect reserved
-> celery: OK
- empty -
```
| Test case added in this PR: https://github.com/celery/celery/pull/6226 | 2021-04-27T02:15:45 |
celery/celery | 6,749 | celery__celery-6749 | [
"6748"
] | 8d6778810c5153c9e4667eed618de2d0bf72663e | diff --git a/celery/exceptions.py b/celery/exceptions.py
--- a/celery/exceptions.py
+++ b/celery/exceptions.py
@@ -180,7 +180,7 @@ def __str__(self):
return f'Retry {self.humanize()}'
def __reduce__(self):
- return self.__class__, (self.message, self.excs, self.when)
+ return self.__class__, (self.message, self.exc, self.when)
RetryTaskError = Retry # noqa: E305 XXX compat
| diff --git a/t/unit/app/test_exceptions.py b/t/unit/app/test_exceptions.py
--- a/t/unit/app/test_exceptions.py
+++ b/t/unit/app/test_exceptions.py
@@ -12,7 +12,10 @@ def test_when_datetime(self):
def test_pickleable(self):
x = Retry('foo', KeyError(), when=datetime.utcnow())
- assert pickle.loads(pickle.dumps(x))
+ y = pickle.loads(pickle.dumps(x))
+ assert x.message == y.message
+ assert repr(x.exc) == repr(y.exc)
+ assert x.when == y.when
class test_Reject:
| Pickling Retry instance is incorrect
# Checklist
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [x] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 4.4.7, 5.0.5, master
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
1. Run celery with default pool
2. Add `autoretry_for` or `raise Retry(exc=ValueError())`
2. Intercept `task-retried` event (i.e. for RabbitMQ it's published to `celeryev` exchange with routing key `task.retried`
3. Expecting "exception" argument to be not empty, getting "None" in it.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
x = Retry('foo', KeyError(), when=datetime.utcnow())
y = pickle.loads(pickle.dumps(x))
assert repr(x) == repr(y)
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Retry instance after pickle/unpickle is the same as initial one
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
After unpicklig Retry looses `self.exc`
| Hey @tumb1er :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-04-29T07:55:51 |
celery/celery | 6,750 | celery__celery-6750 | [
"6422"
] | b0326ab0e249288e8e551e78fcb88ab2c2b84bcb | diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -176,6 +176,7 @@ def __repr__(self):
db=Option(type='int'),
host=Option(type='string'),
max_connections=Option(type='int'),
+ username=Option(type='string'),
password=Option(type='string'),
port=Option(type='int'),
socket_timeout=Option(120.0, type='float'),
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -233,6 +233,17 @@ def __init__(self, host=None, port=None, db=None, password=None,
socket_connect_timeout and float(socket_connect_timeout),
}
+ username = _get('redis_username')
+ if username:
+ # We're extra careful to avoid including this configuration value
+ # if it wasn't specified since older versions of py-redis
+ # don't support specifying a username.
+ # Only Redis>6.0 supports username/password authentication.
+
+ # TODO: Include this in connparams' definition once we drop
+ # support for py-redis<3.4.0.
+ self.connparams['username'] = username
+
if health_check_interval:
self.connparams["health_check_interval"] = health_check_interval
@@ -285,11 +296,11 @@ def __init__(self, host=None, port=None, db=None, password=None,
)
def _params_from_url(self, url, defaults):
- scheme, host, port, _, password, path, query = _parse_url(url)
+ scheme, host, port, username, password, path, query = _parse_url(url)
connparams = dict(
defaults, **dictfilter({
- 'host': host, 'port': port, 'password': password,
- 'db': query.pop('virtual_host', None)})
+ 'host': host, 'port': port, 'username': username,
+ 'password': password, 'db': query.pop('virtual_host', None)})
)
if scheme == 'socket':
| diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -340,6 +340,20 @@ def test_no_redis(self):
with pytest.raises(ImproperlyConfigured):
self.Backend(app=self.app)
+ def test_username_password_from_redis_conf(self):
+ self.app.conf.redis_password = 'password'
+ x = self.Backend(app=self.app)
+
+ assert x.connparams
+ assert 'username' not in x.connparams
+ assert x.connparams['password'] == 'password'
+ self.app.conf.redis_username = 'username'
+ x = self.Backend(app=self.app)
+
+ assert x.connparams
+ assert x.connparams['username'] == 'username'
+ assert x.connparams['password'] == 'password'
+
def test_url(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
@@ -353,6 +367,19 @@ def test_url(self):
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
+ assert 'username' not in x.connparams
+
+ x = self.Backend(
+ 'redis://username:[email protected]:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert x.connparams['host'] == 'vandelay.com'
+ assert x.connparams['db'] == 1
+ assert x.connparams['port'] == 123
+ assert x.connparams['username'] == 'username'
+ assert x.connparams['password'] == 'bosco'
+ assert x.connparams['socket_timeout'] == 30.0
+ assert x.connparams['socket_connect_timeout'] == 100.0
def test_timeouts_in_url_coerced(self):
pytest.importorskip('redis')
| Celery cannot connect to redis >6.0 using ACL user:password broker url
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**5.0.0**:
<!-- Include the output of celery -A proj report below -->
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I'm a new celery user, not quite sure if I should report this as a bug or feature request and I'm not quite sure how to best fix this myself, so I hope you'll bear with me :-)
Redis 6.0 or greater implements users in the form of ACL (see https://redis.io/topics/acl), however, celery throws an WRONGPASS error when using the following broker URL:
```redis://user:password@localhost:6379/0```
We would expect celery to connect to redis using the correct user:password combination and not throw a password error. Older redis versions did not implement ACL; looking at _params_from_url function on line 277 in celery/backend/redis.py, it seems like celery currently only implements a broker URL containing a password and no username, as indicated by line 278:
```scheme, host, port, _, password, path, query = _parse_url(url)```
I would expect celery to implement the user:password@host scheme, as well as the current password@host scheme in order to correctly handle the ACL introduced in redis >6.0
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Celery throws this error:
```
[2020-10-18 02:35:54,748: ERROR/MainProcess] consumer: Cannot connect to redis://user:**@localhost:6379/0: WRONGPASS invalid username-password pair.
Trying again in 18.00 seconds... (9/100)```
| Hi @Insension. I am not sure if I understand. the URL is passed as `broker_url` or as `result_backend`?
> Hi @Insension. I am not sure if I understand. the URL is passed as `broker_url` or as `result_backend`?
Run into the same issue when using the string as broker_url.
> Hi @Insension. I am not sure if I understand. the URL is passed as `broker_url` or as `result_backend`?
Hi @matusvalo , sorry for the late reply; yes this issue arose when attempting to connect to redis as a message broker using 'user:password@localhost:6379/0' `broker_url` format, similar to what @gainskills is experiencing. When using this URL as `result_backend` string, the same issue also occurs.
I can see that my suggestion that this can possibly be traced back to /celery/backend/redis.py could lead to confusion. Is this not the script that handles connecting to redis for both `broker_url` and `result_backend` though?
> > Hi @Insension. I am not sure if I understand. the URL is passed as `broker_url` or as `result_backend`?
>
> Hi @matusvalo , sorry for the late reply; yes this issue arose when attempting to connect to redis as a message broker using 'user:password@localhost:6379/0' `broker_url` format, similar to what @gainskills is experiencing. When using this URL as `result_backend` string, the same issue also occurs.
>
> I can see that my suggestion that this can possibly be traced back to /celery/backend/redis.py could lead to confusion. Is this not the script that handles connecting to redis for both `broker_url` and `result_backend` though?
figured my issue out: it caused by a special character in the password string. It back to work after I removed it.
> > > Hi @Insension. I am not sure if I understand. the URL is passed as `broker_url` or as `result_backend`?
> >
> >
> > Hi @matusvalo , sorry for the late reply; yes this issue arose when attempting to connect to redis as a message broker using 'user:password@localhost:6379/0' `broker_url` format, similar to what @gainskills is experiencing. When using this URL as `result_backend` string, the same issue also occurs.
> > I can see that my suggestion that this can possibly be traced back to /celery/backend/redis.py could lead to confusion. Is this not the script that handles connecting to redis for both `broker_url` and `result_backend` though?
>
> figured my issue out: it caused by a special character in the password string. It back to work after I removed it.
Care to share a test case with us?
I have absolutely the same issue as OP, I am trying to use Redis as broker and backend. I am sure that login, password, host, port are correct since Flask app ( redis-py | run in another terminal window) connects to Redis without any problems.
Should I open my own Issue or I can follow with OP Issue?
```
R_LOGIN = "myuserlogin"
R_PSWD = "xfjhdsfhsjdfhk123123sddsfdsf24fdsfr43"
R_HOST = "127.0.0.1"
celery_app = Celery(
'tasks',
broker=f"redis://{R_LOGIN}:{R_PSWD}@{R_HOST}:6379/0",
backend=f"redis://{R_LOGIN}:{R_PSWD}@{R_HOST}:6379/1"
)
```
Command `celery -A celery_app worker --loglevel=INFO`
Error:
```
- *** --- * ---
- ** ---------- [config]
- ** ---------- .> app: tasks:0x2b48d8dd088
- ** ---------- .> transport: redis://myuserlogin:**@127.0.0.1:6379/0
- ** ---------- .> results: redis://myuserlogin:**@127.0.0.1:6379/1
- *** --- * --- .> concurrency: 12 (prefork)
-- ******* ---- .> task events: OFF (enable -E to monitor tasks in this worker)
--- ***** -----
-------------- [queues]
.> celery exchange=celery(direct) key=celery
[tasks]
. celery_app.add
[2020-11-02 17:09:56,047: ERROR/MainProcess] consumer: Cannot connect to redis://myuserlogin:**@127.0.0.1:6379/0: WRONGPASS invalid username-password pair.
Trying again in 2.00 seconds... (1/100)
[2020-11-02 17:09:56,368: INFO/SpawnPoolWorker-11] child process 80676 calling self.run()
```
Unfortunately without investigation cannot help with this issue. In any case if we are talking about `broker_url` the problem is connected to kombu because the connection string is passed there.
> Unfortunately without investigation cannot help with this issue. In any case if we are talking about `broker_url` the problem is connected to kombu because the connection string is passed there.
Actually it seems that handler just drops login for Redis:
https://github.com/celery/celery/blob/master/celery/backends/redis.py
```
def _params_from_url(self, url, defaults):
# no login - _ it is not used:
scheme, host, port, _, password, path, query = _parse_url(url)
connparams = dict(
defaults, **dictfilter({
'host': host, 'port': port, 'password': password,
'db': query.pop('virtual_host', None)})
)
```
Because I think Redis used to not have a username. I might be wrong.
@ArtyomKozyrev8 Care to provide a fix?
> Because I think Redis used to not have a username. I might be wrong.
> @ArtyomKozyrev8 Care to provide a fix?
Yes it is a new feature available from Redis 6.0.0 Actually it is not really new, I have Redis 6 in production for at least 6 month already.
[https://redis.io/commands/acl-cat](https://redis.io/commands/acl-cat)
@thedrow yes I would like to have the feature, since Redis 6.0 make you to use login if you want to use password.
> > Because I think Redis used to not have a username. I might be wrong.
> > @ArtyomKozyrev8 Care to provide a fix?
>
> Yes it is a new feature available from Redis 6.0.0 Actually it is not really new, I have Redis 6 in production for at least 6 month already.
>
> https://redis.io/commands/acl-cat
>
> @thedrow yes I would like to have the feature, since Redis 6.0 make you to use login if you want to use password.
please feel free to come with a PR so that we can also provide input
@ArtyomKozyrev8 Any chance you'll reach to this soon?
FYI @ArtyomKozyrev8 Heroku ist going to phase out Redis 4/5 and force-switch everybody to Redis 6 on June 30, 2021:
https://devcenter.heroku.com/changelog-items/2078 | 2021-04-29T14:22:35 |
celery/celery | 6,757 | celery__celery-6757 | [
"5890"
] | 9dee18bfbacffbc6f04d61745d20e917a304c1b5 | diff --git a/celery/app/control.py b/celery/app/control.py
--- a/celery/app/control.py
+++ b/celery/app/control.py
@@ -431,7 +431,8 @@ def __init__(self, app=None):
self.mailbox = self.Mailbox(
app.conf.control_exchange,
type='fanout',
- accept=['json'],
+ accept=app.conf.accept_content,
+ serializer=app.conf.task_serializer,
producer_pool=lazy(lambda: self.app.amqp.producer_pool),
queue_ttl=app.conf.control_queue_ttl,
reply_queue_ttl=app.conf.control_queue_ttl,
| diff --git a/t/unit/app/test_control.py b/t/unit/app/test_control.py
--- a/t/unit/app/test_control.py
+++ b/t/unit/app/test_control.py
@@ -241,6 +241,12 @@ def assert_control_called_with_args(self, name, destination=None,
self.app.control.broadcast.assert_called_with(
name, destination=destination, arguments=args, **_options or {})
+ def test_serializer(self):
+ self.app.conf['task_serializer'] = 'test'
+ self.app.conf['accept_content'] = ['test']
+ assert control.Control(self.app).mailbox.serializer == 'test'
+ assert control.Control(self.app).mailbox.accept == ['test']
+
def test_purge(self):
self.app.amqp.TaskConsumer = Mock(name='TaskConsumer')
self.app.control.purge()
| 4.4 silently changes the Inspect API
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
<!--
Please describe what's missing or incorrect about our documentation.
Include links and/or screenshots which will aid us to resolve the issue.
-->
Prior to Celery 4.4.0, the celery.app.control.Inspect.scheduled() API used to return "args" and "kwargs" as strings which could be parsed using ast.literal_eval. Now, they seem to be a list and dict respectively:
{'id': 'd82058c7-12b0-4914-8381-2fef9ad874a9', 'name': '...', 'args': ['time', 'sleep', 5], 'kwargs': {}, 'type': '...', }
# Suggestions
<!-- Please provide us suggestions for how to fix the documentation -->
I assume this is a deliberate design change; it would have been helpful to add this breaking change to the release notes!
| I admit the release notes are not complete as I released the 4.4.0 quite hastily before the holiday season starts. thanks for the report
I think this issue is related to: https://groups.google.com/g/celery-users/c/1F61iEj-YFI/m/N7d64AalDAAJ
https://github.com/celery/celery/blob/8492b75c579564c2af5c2be75fe4b2118ebd0cd1/celery/worker/request.py#L558-L559
args and kwargs are passed directly. So correctly working when using JSON serialization.
But when using pickle serialization, it causes error.
```
kombu.exceptions.EncodeError: Object of type **** is not JSON serializable
```
## Reproduction
`celery -A repro worker` and `python repro.py` in separated two shells.
```python
import time
from celery import Celery
app = Celery('example')
app.conf.update(
backend_url='redis://localhost:6379',
broker_url='redis://localhost:6379',
result_backend='redis://localhost:6379',
task_serializer='pickle',
accept_content=['pickle', 'json'],
)
@app.task(name='task1')
def task1(*args, **kwargs):
print('Start', args, kwargs)
time.sleep(30)
print('Finish', args, kwargs)
def main():
task1.delay({1, 2, 3}) # something not JSON serializable.
# inspect queue items
inspected = app.control.inspect()
active_tasks = inspected.active()
print(active_tasks)
if __name__ == '__main__':
main()
```
> I think this issue is related to: https://groups.google.com/g/celery-users/c/1F61iEj-YFI/m/N7d64AalDAAJ
>
> https://github.com/celery/celery/blob/8492b75c579564c2af5c2be75fe4b2118ebd0cd1/celery/worker/request.py#L558-L559
>
> args and kwargs are passed directly. So correctly working when using JSON serialization.
> But when using pickle serialization, it causes error.
>
> ```
> kombu.exceptions.EncodeError: Object of type **** is not JSON serializable
> ```
>
> ## Reproduction
>
> `celery -A repro worker` and `python repro.py` in separated two shells.
>
> ```python
> import time
>
> from celery import Celery
>
>
> app = Celery('example')
> app.conf.update(
> backend_url='redis://localhost:6379',
> broker_url='redis://localhost:6379',
> result_backend='redis://localhost:6379',
> task_serializer='pickle',
> accept_content=['pickle', 'json'],
> )
>
>
> @app.task(name='task1')
> def task1(*args, **kwargs):
> print('Start', args, kwargs)
> time.sleep(30)
> print('Finish', args, kwargs)
>
>
> def main():
> task1.delay({1, 2, 3}) # something not JSON serializable.
>
> # inspect queue items
> inspected = app.control.inspect()
> active_tasks = inspected.active()
> print(active_tasks)
>
>
> if __name__ == '__main__':
> main()
> ```
would you mind finding a possible fix?
I think there is a possible fix like this https://github.com/celery/celery/pull/6567
We need to understand how we should fix this issue either via PR #6567 or via my comment: https://github.com/celery/celery/pull/6567#issuecomment-788008337 | 2021-05-06T09:32:50 |
celery/celery | 6,758 | celery__celery-6758 | [
"6756"
] | 1cd6521344c95ca2ddaa8feffb51b4c6612d740c | diff --git a/celery/app/trace.py b/celery/app/trace.py
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -52,6 +52,11 @@
logger = get_logger(__name__)
+#: Format string used to log task receipt.
+LOG_RECEIVED = """\
+Task %(name)s[%(id)s] received\
+"""
+
#: Format string used to log task success.
LOG_SUCCESS = """\
Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -4,6 +4,7 @@
from kombu.asynchronous.timer import to_timestamp
from celery import signals
+from celery.app import trace as _app_trace
from celery.exceptions import InvalidTaskError
from celery.utils.imports import symbol_by_name
from celery.utils.log import get_logger
@@ -148,7 +149,10 @@ def task_message_handler(message, body, ack, reject, callbacks,
body=body, headers=headers, decoded=decoded, utc=utc,
)
if _does_info:
- info('Received task: %s', req)
+ # Similar to `app.trace.info()`, we pass the formatting args as the
+ # `extra` kwarg for custom log handlers
+ context = {'id': req.id, 'name': req.name}
+ info(_app_trace.LOG_RECEIVED, context, extra={'data': context})
if (req.expires or req.id in revoked_tasks) and req.revoked():
return
| diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py
--- a/t/unit/worker/test_strategy.py
+++ b/t/unit/worker/test_strategy.py
@@ -1,3 +1,4 @@
+import logging
from collections import defaultdict
from contextlib import contextmanager
from unittest.mock import ANY, Mock, patch
@@ -6,6 +7,7 @@
from kombu.utils.limits import TokenBucket
from celery import Task, signals
+from celery.app.trace import LOG_RECEIVED
from celery.exceptions import InvalidTaskError
from celery.utils.time import rate
from celery.worker import state
@@ -142,12 +144,14 @@ def _context(self, sig,
message = self.prepare_message(message)
yield self.Context(sig, s, reserved, consumer, message)
- def test_when_logging_disabled(self):
+ def test_when_logging_disabled(self, caplog):
+ # Capture logs at any level above `NOTSET`
+ caplog.set_level(logging.NOTSET + 1, logger="celery.worker.strategy")
with patch('celery.worker.strategy.logger') as logger:
logger.isEnabledFor.return_value = False
with self._context(self.add.s(2, 2)) as C:
C()
- logger.info.assert_not_called()
+ assert not caplog.records
def test_task_strategy(self):
with self._context(self.add.s(2, 2)) as C:
@@ -165,6 +169,33 @@ def test_callbacks(self):
for callback in callbacks:
callback.assert_called_with(req)
+ def test_log_task_received(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ with self._context(self.add.s(2, 2)) as C:
+ C()
+ for record in caplog.records:
+ if record.msg == LOG_RECEIVED:
+ assert record.levelno == logging.INFO
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
+ def test_log_task_received_custom(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ custom_fmt = "CUSTOM MESSAGE"
+ with self._context(
+ self.add.s(2, 2)
+ ) as C, patch(
+ "celery.app.trace.LOG_RECEIVED", new=custom_fmt,
+ ):
+ C()
+ for record in caplog.records:
+ if record.msg == custom_fmt:
+ assert set(record.args) == {"id", "name"}
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
def test_signal_task_received(self):
callback = Mock()
with self._context(self.add.s(2, 2)) as C:
| Ability to customize `Received task` message
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [X] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
Currently `LOG_SUCCESS` and friends allows customizing the messages, but `Received task` is fixed. It would be good for this to be customizable and include the same values in `extra` (i.e. use `celery.app.trace.info` rather than `logging.info`). I was thinking something like this:
```
info('Received task: %s', {
'name': req.name,
'id': req.id,
})
```
# Design
## Architectural Considerations
Should be a non-breaking change that only adds a customization point
## Proposed Behavior
Add `LOG_RECEIVED` so it can be customized as well
## Proposed UI/UX
N/A
## Diagrams
N/A
## Alternatives
None that I can think of
| Hey @daveisfera :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
Pull requests are welcome! | 2021-05-06T14:21:48 |
celery/celery | 6,765 | celery__celery-6765 | [
"6763"
] | 1cd6521344c95ca2ddaa8feffb51b4c6612d740c | diff --git a/celery/backends/redis.py b/celery/backends/redis.py
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -7,7 +7,7 @@
from kombu.utils.functional import retry_over_time
from kombu.utils.objects import cached_property
-from kombu.utils.url import _parse_url
+from kombu.utils.url import _parse_url, maybe_sanitize_url
from celery import states
from celery._state import task_join_will_block
@@ -585,6 +585,8 @@ class SentinelManagedSSLConnection(
class SentinelBackend(RedisBackend):
"""Redis sentinel task result store."""
+ # URL looks like `sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3`
+ _SERVER_URI_SEPARATOR = ";"
sentinel = getattr(redis, "sentinel", None)
connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
@@ -595,9 +597,30 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
+ def as_uri(self, include_password=False):
+ """
+ Return the server addresses as URIs, sanitizing the password or not.
+ """
+ # Allow superclass to do work if we don't need to force sanitization
+ if include_password:
+ return super(SentinelBackend, self).as_uri(
+ include_password=include_password,
+ )
+ # Otherwise we need to ensure that all components get sanitized rather
+ # by passing them one by one to the `kombu` helper
+ uri_chunks = (
+ maybe_sanitize_url(chunk)
+ for chunk in (self.url or "").split(self._SERVER_URI_SEPARATOR)
+ )
+ # Similar to the superclass, strip the trailing slash from URIs with
+ # all components empty other than the scheme
+ return self._SERVER_URI_SEPARATOR.join(
+ uri[:-1] if uri.endswith(":///") else uri
+ for uri in uri_chunks
+ )
+
def _params_from_url(self, url, defaults):
- # URL looks like sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3.
- chunks = url.split(";")
+ chunks = url.split(self._SERVER_URI_SEPARATOR)
connparams = dict(defaults, hosts=[])
for chunk in chunks:
data = super()._params_from_url(
| diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1203,6 +1203,16 @@ def test_url(self):
found_dbs = [cp['db'] for cp in x.connparams['hosts']]
assert found_dbs == expected_dbs
+ # By default passwords should be sanitized
+ display_url = x.as_uri()
+ assert "test" not in display_url
+ # We can choose not to sanitize with the `include_password` argument
+ unsanitized_display_url = x.as_uri(include_password=True)
+ assert unsanitized_display_url == x.url
+ # or to explicitly sanitize
+ forcibly_sanitized_display_url = x.as_uri(include_password=False)
+ assert forcibly_sanitized_display_url == display_url
+
def test_get_sentinel_instance(self):
x = self.Backend(
'sentinel://:[email protected]:123/1;'
| Celery Worker Redis Sentinel Password Exposed in STDOUT
# Checklist
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [X] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [X ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: v5.0.5
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```
CELERY_BROKER_URL = 'sentinel://:[email protected]:26379/0;sentinel://:[email protected]:26379/0;sentinel://:[email protected]:26379/0'
CELERY_RESULT_BACKEND = 'sentinel://:[email protected]:26379/0;sentinel://:[email protected]:26379/0;sentinel://:[email protected]:26379/0'
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
```
- ** ---------- .> transport: sentinel://:**@server1.com:26379/0
- ** ---------- .> results: sentinel://:**@server1.com:26379/0;sentinel://:**@server2.com:26379/0;sentinel://:**@server3.com:26379/0
- *** --- * --- .> concurrency: 8 (prefork)
```
# Actual Behavior
```
- ** ---------- .> transport: sentinel://:**@server1.com:26379/0
- ** ---------- .> results: sentinel://:**@server1.com:26379/0;sentinel://:[email protected]:26379/0;sentinel://:[email protected]:26379/0
- *** --- * --- .> concurrency: 8 (prefork)
```
The results output in celery (at launch) shows the redis sentinel password when using results.
| Hey @K-MTG :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-05-11T03:00:18 |
celery/celery | 6,770 | celery__celery-6770 | [
"6441"
] | 536849c98ae3e75026ead822542b936e272d2b2b | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -6,9 +6,9 @@
from kombu.exceptions import OperationalError
from kombu.utils.uuid import uuid
-from celery import current_app, group, states
+from celery import current_app, states
from celery._state import _task_stack
-from celery.canvas import _chain, signature
+from celery.canvas import _chain, group, signature
from celery.exceptions import (Ignore, ImproperlyConfigured,
MaxRetriesExceededError, Reject, Retry)
from celery.local import class_property
@@ -893,41 +893,40 @@ def replace(self, sig):
raise ImproperlyConfigured(
"A signature replacing a task must not be part of a chord"
)
+ if isinstance(sig, _chain) and not getattr(sig, "tasks", True):
+ raise ImproperlyConfigured("Cannot replace with an empty chain")
+ # Ensure callbacks or errbacks from the replaced signature are retained
if isinstance(sig, group):
- sig |= self.app.tasks['celery.accumulate'].s(index=0).set(
- link=self.request.callbacks,
- link_error=self.request.errbacks,
- )
- elif isinstance(sig, _chain):
- if not sig.tasks:
- raise ImproperlyConfigured(
- "Cannot replace with an empty chain"
- )
-
- if self.request.chain:
- # We need to freeze the new signature with the current task's ID to
- # ensure that we don't disassociate the new chain from the existing
- # task IDs which would break previously constructed results
- # objects.
- sig.freeze(self.request.id)
- if "link" in sig.options:
- final_task_links = sig.tasks[-1].options.setdefault("link", [])
- final_task_links.extend(maybe_list(sig.options["link"]))
- # Construct the new remainder of the task by chaining the signature
- # we're being replaced by with signatures constructed from the
- # chain elements in the current request.
- for t in reversed(self.request.chain):
- sig |= signature(t, app=self.app)
-
+ # Groups get uplifted to a chord so that we can link onto the body
+ sig |= self.app.tasks['celery.accumulate'].s(index=0)
+ for callback in maybe_list(self.request.callbacks) or []:
+ sig.link(callback)
+ for errback in maybe_list(self.request.errbacks) or []:
+ sig.link_error(errback)
+ # If the replacement signature is a chain, we need to push callbacks
+ # down to the final task so they run at the right time even if we
+ # proceed to link further tasks from the original request below
+ if isinstance(sig, _chain) and "link" in sig.options:
+ final_task_links = sig.tasks[-1].options.setdefault("link", [])
+ final_task_links.extend(maybe_list(sig.options["link"]))
+ # We need to freeze the replacement signature with the current task's
+ # ID to ensure that we don't disassociate it from the existing task IDs
+ # which would break previously constructed results objects.
+ sig.freeze(self.request.id)
+ # Ensure the important options from the original signature are retained
sig.set(
chord=chord,
group_id=self.request.group,
group_index=self.request.group_index,
root_id=self.request.root_id,
)
- sig.freeze(self.request.id)
-
+ # If the task being replaced is part of a chain, we need to re-create
+ # it with the replacement signature - these subsequent tasks will
+ # retain their original task IDs as well
+ for t in reversed(self.request.chain or []):
+ sig |= signature(t, app=self.app)
+ # Finally, either apply or delay the new signature!
if self.request.is_eager:
return sig.apply().get()
else:
diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -642,7 +642,8 @@ def apply_async(self, args=None, kwargs=None, **options):
def run(self, args=None, kwargs=None, group_id=None, chord=None,
task_id=None, link=None, link_error=None, publisher=None,
- producer=None, root_id=None, parent_id=None, app=None, **options):
+ producer=None, root_id=None, parent_id=None, app=None,
+ group_index=None, **options):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
args = args if args else ()
@@ -656,7 +657,7 @@ def run(self, args=None, kwargs=None, group_id=None, chord=None,
tasks, results_from_prepare = self.prepare_steps(
args, kwargs, self.tasks, root_id, parent_id, link_error, app,
- task_id, group_id, chord,
+ task_id, group_id, chord, group_index=group_index,
)
if results_from_prepare:
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -217,17 +217,17 @@ def retry_once_priority(self, *args, expires=60.0, max_retries=1,
@shared_task
-def redis_echo(message):
+def redis_echo(message, redis_key="redis-echo"):
"""Task that appends the message to a redis list."""
redis_connection = get_redis_connection()
- redis_connection.rpush('redis-echo', message)
+ redis_connection.rpush(redis_key, message)
@shared_task
-def redis_count():
- """Task that increments a well-known redis key."""
+def redis_count(redis_key="redis-count"):
+ """Task that increments a specified or well-known redis key."""
redis_connection = get_redis_connection()
- redis_connection.incr('redis-count')
+ redis_connection.incr(redis_key)
@shared_task(bind=True)
@@ -295,6 +295,12 @@ def fail(*args):
raise ExpectedException(*args)
+@shared_task(bind=True)
+def fail_replaced(self, *args):
+ """Replace this task with one which raises ExpectedException."""
+ raise self.replace(fail.si(*args))
+
+
@shared_task
def chord_error(*args):
return args
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1,3 +1,4 @@
+import collections
import re
import tempfile
import uuid
@@ -18,12 +19,12 @@
from .tasks import (ExpectedException, add, add_chord_to_chord, add_replaced,
add_to_all, add_to_all_to_chord, build_chain_inside_task,
chord_error, collect_ids, delayed_sum,
- delayed_sum_with_soft_guard, fail, identity, ids,
- print_unicode, raise_error, redis_count, redis_echo,
- replace_with_chain, replace_with_chain_which_raises,
- replace_with_empty_chain, retry_once, return_exception,
- return_priority, second_order_replace1, tsum,
- write_to_file_and_return_int)
+ delayed_sum_with_soft_guard, fail, fail_replaced,
+ identity, ids, print_unicode, raise_error, redis_count,
+ redis_echo, replace_with_chain,
+ replace_with_chain_which_raises, replace_with_empty_chain,
+ retry_once, return_exception, return_priority,
+ second_order_replace1, tsum, write_to_file_and_return_int)
RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError)
@@ -43,6 +44,62 @@ def flaky(fn):
return _timeout(_flaky(fn))
+def await_redis_echo(expected_msgs, redis_key="redis-echo", timeout=TIMEOUT):
+ """
+ Helper to wait for a specified or well-known redis key to contain a string.
+ """
+ redis_connection = get_redis_connection()
+
+ if isinstance(expected_msgs, (str, bytes, bytearray)):
+ expected_msgs = (expected_msgs, )
+ expected_msgs = collections.Counter(
+ e if not isinstance(e, str) else e.encode("utf-8")
+ for e in expected_msgs
+ )
+
+ # This can technically wait for `len(expected_msg_or_msgs) * timeout` :/
+ while +expected_msgs:
+ maybe_key_msg = redis_connection.blpop(redis_key, timeout)
+ if maybe_key_msg is None:
+ raise TimeoutError(
+ "Fetching from {!r} timed out - still awaiting {!r}"
+ .format(redis_key, dict(+expected_msgs))
+ )
+ retrieved_key, msg = maybe_key_msg
+ assert retrieved_key.decode("utf-8") == redis_key
+ expected_msgs[msg] -= 1 # silently accepts unexpected messages
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop(redis_key, min(1, timeout)) is None
+
+
+def await_redis_count(expected_count, redis_key="redis-count", timeout=TIMEOUT):
+ """
+ Helper to wait for a specified or well-known redis key to count to a value.
+ """
+ redis_connection = get_redis_connection()
+
+ check_interval = 0.1
+ check_max = int(timeout / check_interval)
+ for i in range(check_max + 1):
+ maybe_count = redis_connection.get(redis_key)
+ # It's either `None` or a base-10 integer
+ if maybe_count is not None:
+ count = int(maybe_count)
+ if count == expected_count:
+ break
+ elif i >= check_max:
+ assert count == expected_count
+ # try again later
+ sleep(check_interval)
+ else:
+ raise TimeoutError("{!r} was never incremented".format(redis_key))
+
+ # There should be no more increments - block momentarily
+ sleep(min(1, timeout))
+ assert int(redis_connection.get(redis_key)) == expected_count
+
+
class test_link_error:
@flaky
def test_link_error_eager(self):
@@ -476,19 +533,7 @@ def test_chain_replaced_with_a_chain_and_a_callback(self, manager):
res = c.delay()
assert res.get(timeout=TIMEOUT) == 'Hello world'
-
- expected_msgs = {link_msg, }
- while expected_msgs:
- maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError('redis-echo')
- _, msg = maybe_key_msg
- msg = msg.decode()
- expected_msgs.remove(msg) # KeyError if `msg` is not in here
-
- # There should be no more elements - block momentarily
- assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
- redis_connection.delete('redis-echo')
+ await_redis_echo({link_msg, })
def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
@@ -507,19 +552,7 @@ def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager):
with pytest.raises(ValueError):
res.get(timeout=TIMEOUT)
-
- expected_msgs = {link_msg, }
- while expected_msgs:
- maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError('redis-echo')
- _, msg = maybe_key_msg
- msg = msg.decode()
- expected_msgs.remove(msg) # KeyError if `msg` is not in here
-
- # There should be no more elements - block momentarily
- assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
- redis_connection.delete('redis-echo')
+ await_redis_echo({link_msg, })
def test_chain_with_cb_replaced_with_chain_with_cb(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
@@ -539,22 +572,11 @@ def test_chain_with_cb_replaced_with_chain_with_cb(self, manager):
res = c.delay()
assert res.get(timeout=TIMEOUT) == 'Hello world'
+ await_redis_echo({link_msg, 'Hello world'})
- expected_msgs = {link_msg, 'Hello world'}
- while expected_msgs:
- maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError('redis-echo')
- _, msg = maybe_key_msg
- msg = msg.decode()
- expected_msgs.remove(msg) # KeyError if `msg` is not in here
-
- # There should be no more elements - block momentarily
- assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
- redis_connection.delete('redis-echo')
-
- @pytest.mark.xfail(reason="#6441")
- def test_chain_with_eb_replaced_with_chain_with_eb(self, manager):
+ def test_chain_with_eb_replaced_with_chain_with_eb(
+ self, manager, subtests
+ ):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
@@ -565,30 +587,18 @@ def test_chain_with_eb_replaced_with_chain_with_eb(self, manager):
outer_link_msg = 'External chain errback'
c = chain(
identity.s('Hello '),
- # The replacement chain will pass its args though
+ # The replacement chain will die and break the encapsulating chain
replace_with_chain_which_raises.s(link_msg=inner_link_msg),
add.s('world'),
)
- c.link_error(redis_echo.s(outer_link_msg))
+ c.link_error(redis_echo.si(outer_link_msg))
res = c.delay()
- with pytest.raises(ValueError):
- res.get(timeout=TIMEOUT)
-
- expected_msgs = {inner_link_msg, outer_link_msg}
- while expected_msgs:
- # Shorter timeout here because we expect failure
- timeout = min(5, TIMEOUT)
- maybe_key_msg = redis_connection.blpop('redis-echo', timeout)
- if maybe_key_msg is None:
- raise TimeoutError('redis-echo')
- _, msg = maybe_key_msg
- msg = msg.decode()
- expected_msgs.remove(msg) # KeyError if `msg` is not in here
-
- # There should be no more elements - block momentarily
- assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
- redis_connection.delete('redis-echo')
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ with pytest.raises(ValueError):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain and child task callbacks are called"):
+ await_redis_echo({inner_link_msg, outer_link_msg})
def test_replace_chain_with_empty_chain(self, manager):
r = chain(identity.s(1), replace_with_empty_chain.s()).delay()
@@ -597,6 +607,152 @@ def test_replace_chain_with_empty_chain(self, manager):
match="Cannot replace with an empty chain"):
r.get(timeout=TIMEOUT)
+ def test_chain_children_with_callbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = identity.si(1337)
+ child_sig.link(callback)
+ chain_sig = chain(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 1337
+ with subtests.test(msg="Chain child task callbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_children_with_errbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = fail.si()
+ child_sig.link_error(errback)
+ chain_sig = chain(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain child task errbacks are called"):
+ # Only the first child task gets a change to run and fail
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_with_callback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ chain_sig = chain(add_replaced.si(42, 1337), identity.s())
+ chain_sig.link(callback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
+ with subtests.test(msg="Callback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_with_errback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ chain_sig = chain(add_replaced.si(42, 1337), fail.s())
+ chain_sig.link_error(errback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_child_with_callback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_sig = add_replaced.si(42, 1337)
+ child_sig.link(callback)
+ chain_sig = chain(child_sig, identity.s())
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
+ with subtests.test(msg="Callback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_child_with_errback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_sig = fail_replaced.si()
+ child_sig.link_error(errback)
+ chain_sig = chain(child_sig, identity.si(42))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_task_replaced_with_chain(self):
+ orig_sig = replace_with_chain.si(42)
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_first(self):
+ orig_sig = chain(replace_with_chain.si(42), identity.s())
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_middle(self):
+ orig_sig = chain(
+ identity.s(42), replace_with_chain.s(), identity.s()
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_last(self):
+ orig_sig = chain(identity.s(42), replace_with_chain.s())
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
class test_result_set:
@@ -818,20 +974,18 @@ def test_callback_called_by_group(self, manager, subtests):
redis_connection = get_redis_connection()
callback_msg = str(uuid.uuid4()).encode()
- callback = redis_echo.si(callback_msg)
+ redis_key = str(uuid.uuid4())
+ callback = redis_echo.si(callback_msg, redis_key=redis_key)
group_sig = group(identity.si(42), identity.si(1337))
group_sig.link(callback)
- redis_connection.delete("redis-echo")
+ redis_connection.delete(redis_key)
with subtests.test(msg="Group result is returned"):
res = group_sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 1337]
with subtests.test(msg="Callback is called after group is completed"):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Callback was not called in time")
- _, msg = maybe_key_msg
- assert msg == callback_msg
+ await_redis_echo({callback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_first(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
@@ -839,21 +993,19 @@ def test_errback_called_by_group_fail_first(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
group_sig = group(fail.s(), identity.si(42))
group_sig.link_error(errback)
- redis_connection.delete("redis-echo")
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_last(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
@@ -861,21 +1013,19 @@ def test_errback_called_by_group_fail_last(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
group_sig = group(identity.si(42), fail.s())
group_sig.link_error(errback)
- redis_connection.delete("redis-echo")
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_multiple(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
@@ -883,7 +1033,8 @@ def test_errback_called_by_group_fail_multiple(self, manager, subtests):
redis_connection = get_redis_connection()
expected_errback_count = 42
- errback = redis_count.si()
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
# Include a mix of passing and failing tasks
group_sig = group(
@@ -891,29 +1042,155 @@ def test_errback_called_by_group_fail_multiple(self, manager, subtests):
*(fail.s() for _ in range(expected_errback_count)),
)
group_sig.link_error(errback)
- redis_connection.delete("redis-count")
+
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
- check_interval = 0.1
- check_max = int(TIMEOUT * check_interval)
- for i in range(check_max + 1):
- maybe_count = redis_connection.get("redis-count")
- # It's either `None` or a base-10 integer
- count = int(maybe_count or b"0")
- if count == expected_errback_count:
- # escape and pass
- break
- elif i < check_max:
- # try again later
- sleep(check_interval)
- else:
- # fail
- assert count == expected_errback_count
- else:
- raise TimeoutError("Errbacks were not called in time")
+ await_redis_count(expected_errback_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_children_with_callbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = identity.si(1337)
+ child_sig.link(callback)
+ group_sig = group(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [1337] * child_task_count
+ with subtests.test(msg="Chain child task callbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_children_with_errbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = fail.si()
+ child_sig.link_error(errback)
+ group_sig = group(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain child task errbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_with_callback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ group_sig = group(add_replaced.si(42, 1337), identity.si(31337))
+ group_sig.link(callback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
+ with subtests.test(msg="Callback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_with_errback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ group_sig = group(add_replaced.si(42, 1337), fail.s())
+ group_sig.link_error(errback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_with_callback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_sig = add_replaced.si(42, 1337)
+ child_sig.link(callback)
+ group_sig = group(child_sig, identity.si(31337))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
+ with subtests.test(msg="Callback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_with_errback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_sig = fail_replaced.si()
+ child_sig.link_error(errback)
+ group_sig = group(child_sig, identity.si(42))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_replaced_with_chain_first(self):
+ orig_sig = group(replace_with_chain.si(42), identity.s(1337))
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_group_child_replaced_with_chain_middle(self):
+ orig_sig = group(
+ identity.s(42), replace_with_chain.s(1337), identity.s(31337)
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337]
+
+ def test_group_child_replaced_with_chain_last(self):
+ orig_sig = group(identity.s(42), replace_with_chain.s(1337))
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
@@ -1537,40 +1814,34 @@ def test_errback_called_by_chord_from_simple(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
child_sig = fail.s()
chord_sig = chord((child_sig, ), identity.s())
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from simple header task"):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after simple header task fails"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
chord_sig = chord((identity.si(42), ), child_sig)
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from simple body task"):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after simple body task fails"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_error_propagates_to_chord_from_chain(self, manager, subtests):
try:
@@ -1602,44 +1873,38 @@ def test_errback_called_by_chord_from_chain(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
child_sig = chain(identity.si(42), fail.s(), identity.si(42))
chord_sig = chord((child_sig, ), identity.s())
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(
msg="Error propagates from header chain which fails before the end"
):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after header chain which fails before the end"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
chord_sig = chord((identity.si(42), ), child_sig)
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(
msg="Error propagates from body chain which fails before the end"
):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after body chain which fails before the end"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_error_propagates_to_chord_from_chain_tail(self, manager, subtests):
try:
@@ -1671,44 +1936,38 @@ def test_errback_called_by_chord_from_chain_tail(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
child_sig = chain(identity.si(42), fail.s())
chord_sig = chord((child_sig, ), identity.s())
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(
msg="Error propagates from header chain which fails at the end"
):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after header chain which fails at the end"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
chord_sig = chord((identity.si(42), ), child_sig)
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(
msg="Error propagates from body chain which fails at the end"
):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(
msg="Errback is called after body chain which fails at the end"
):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_error_propagates_to_chord_from_group(self, manager, subtests):
try:
@@ -1736,36 +1995,30 @@ def test_errback_called_by_chord_from_group(self, manager, subtests):
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
- errback = redis_echo.si(errback_msg)
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
child_sig = group(identity.si(42), fail.s())
chord_sig = chord((child_sig, ), identity.s())
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from header group"):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after header group fails"):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
chord_sig = chord((identity.si(42), ), child_sig)
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from body group"):
- redis_connection.delete("redis-echo")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after body group fails"):
- maybe_key_msg = redis_connection.blpop("redis-echo", TIMEOUT)
- if maybe_key_msg is None:
- raise TimeoutError("Errback was not called in time")
- _, msg = maybe_key_msg
- assert msg == errback_msg
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
def test_errback_called_by_chord_from_group_fail_multiple(
self, manager, subtests
@@ -1775,7 +2028,8 @@ def test_errback_called_by_chord_from_group_fail_multiple(
redis_connection = get_redis_connection()
fail_task_count = 42
- errback = redis_count.si()
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
# Include a mix of passing and failing tasks
child_sig = group(
*(identity.si(42) for _ in range(24)), # arbitrary task count
@@ -1784,61 +2038,133 @@ def test_errback_called_by_chord_from_group_fail_multiple(
chord_sig = chord((child_sig, ), identity.s())
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from header group"):
- redis_connection.delete("redis-count")
+ redis_connection.delete(redis_key)
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after header group fails"):
# NOTE: Here we only expect the errback to be called once since it
# is attached to the chord body which is a single task!
- expected_errback_count = 1
- check_interval = 0.1
- check_max = int(TIMEOUT * check_interval)
- for i in range(check_max + 1):
- maybe_count = redis_connection.get("redis-count")
- # It's either `None` or a base-10 integer
- count = int(maybe_count or b"0")
- if count == expected_errback_count:
- # escape and pass
- break
- elif i < check_max:
- # try again later
- sleep(check_interval)
- else:
- # fail
- assert count == expected_errback_count
- else:
- raise TimeoutError("Errbacks were not called in time")
+ await_redis_count(1, redis_key=redis_key)
chord_sig = chord((identity.si(42), ), child_sig)
chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from body group"):
- redis_connection.delete("redis-count")
res = chord_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after body group fails"):
# NOTE: Here we expect the errback to be called once per failing
# task in the chord body since it is a group
- expected_errback_count = fail_task_count
- check_interval = 0.1
- check_max = int(TIMEOUT * check_interval)
- for i in range(check_max + 1):
- maybe_count = redis_connection.get("redis-count")
- # It's either `None` or a base-10 integer
- count = int(maybe_count or b"0")
- if count == expected_errback_count:
- # escape and pass
- break
- elif i < check_max:
- # try again later
- sleep(check_interval)
- else:
- # fail
- assert count == expected_errback_count
- else:
- raise TimeoutError("Errbacks were not called in time")
+ await_redis_count(fail_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chord_header_task_replaced_with_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ replace_with_chain.si(42),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_header_child_replaced_with_chain_first(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (replace_with_chain.si(42), identity.s(1337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_chord_header_child_replaced_with_chain_middle(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (identity.s(42), replace_with_chain.s(1337), identity.s(31337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337]
+
+ def test_chord_header_child_replaced_with_chain_last(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (identity.s(42), replace_with_chain.s(1337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_chord_body_task_replaced_with_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ replace_with_chain.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_first(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(replace_with_chain.s(), identity.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_middle(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(identity.s(), replace_with_chain.s(), identity.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_last(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(identity.s(), replace_with_chain.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
class test_signature_serialization:
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -854,7 +854,7 @@ def test_apply_contains_chords_containing_empty_chain(self):
# This is an invalid setup because we can't complete a chord header if
# there are no actual tasks which will run in it. However, the current
# behaviour of an `IndexError` isn't particularly helpful to a user.
- res_obj = group_sig.apply_async()
+ group_sig.apply_async()
def test_apply_contains_chords_containing_chain_with_empty_tail(self):
ggchild_count = 42
diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -1,7 +1,7 @@
import socket
import tempfile
from datetime import datetime, timedelta
-from unittest.mock import ANY, MagicMock, Mock, patch
+from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel
import pytest
from case import ContextMock
@@ -992,10 +992,12 @@ def test_send_event(self):
retry=True, retry_policy=self.app.conf.task_publish_retry_policy)
def test_replace(self):
- sig1 = Mock(name='sig1')
+ sig1 = MagicMock(name='sig1')
sig1.options = {}
+ self.mytask.request.id = sentinel.request_id
with pytest.raises(Ignore):
self.mytask.replace(sig1)
+ sig1.freeze.assert_called_once_with(self.mytask.request.id)
def test_replace_with_chord(self):
sig1 = Mock(name='sig1')
@@ -1003,7 +1005,6 @@ def test_replace_with_chord(self):
with pytest.raises(ImproperlyConfigured):
self.mytask.replace(sig1)
- @pytest.mark.usefixtures('depends_on_current_app')
def test_replace_callback(self):
c = group([self.mytask.s()], app=self.app)
c.freeze = Mock(name='freeze')
@@ -1011,29 +1012,23 @@ def test_replace_callback(self):
self.mytask.request.id = 'id'
self.mytask.request.group = 'group'
self.mytask.request.root_id = 'root_id'
- self.mytask.request.callbacks = 'callbacks'
- self.mytask.request.errbacks = 'errbacks'
-
- class JsonMagicMock(MagicMock):
- parent = None
-
- def __json__(self):
- return 'whatever'
-
- def reprcall(self, *args, **kwargs):
- return 'whatever2'
-
- mocked_signature = JsonMagicMock(name='s')
- accumulate_mock = JsonMagicMock(name='accumulate', s=mocked_signature)
- self.mytask.app.tasks['celery.accumulate'] = accumulate_mock
-
- try:
- self.mytask.replace(c)
- except Ignore:
- mocked_signature.return_value.set.assert_called_with(
- link='callbacks',
- link_error='errbacks',
- )
+ self.mytask.request.callbacks = callbacks = 'callbacks'
+ self.mytask.request.errbacks = errbacks = 'errbacks'
+
+ # Replacement groups get uplifted to chords so that we can accumulate
+ # the results and link call/errbacks - patch the appropriate `chord`
+ # methods so we can validate this behaviour
+ with patch(
+ "celery.canvas.chord.link"
+ ) as mock_chord_link, patch(
+ "celery.canvas.chord.link_error"
+ ) as mock_chord_link_error:
+ with pytest.raises(Ignore):
+ self.mytask.replace(c)
+ # Confirm that the call/errbacks on the original signature are linked
+ # to the replacement signature as expected
+ mock_chord_link.assert_called_once_with(callbacks)
+ mock_chord_link_error.assert_called_once_with(errbacks)
def test_replace_group(self):
c = group([self.mytask.s()], app=self.app)
| Errback inherited from an encapsulating chain is dropped when replacing tasks
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
Swap the commented line setting `c` for expected behaviour.
```python
import celery
app = celery.Celery(broker="redis://", backend="redis://")
@app.task
def nop(*_):
pass
@app.task
def die(*_):
raise RuntimeError
@app.task(bind=True)
def replace(self, with_):
with_ = celery.Signature.from_dict(with_)
raise self.replace(with_)
@app.task
def cb(*args):
print("CALLBACK", *args)
#c = celery.chain(nop.s(), die.s())
c = celery.chain(nop.s(), replace.si(die.s()))
c.link_error(cb.s())
c.apply_async()
```
</p>
</details>
# Expected Behavior
`cb` should be called as a new-style errback because it accepts starargs
# Actual Behavior
`cb` is not called
| @auvipy "One test is marked with an `xfail` since errbacks of encapsulating
chains are not currently called as expected due to some ambiguity in
when an errback of a replaced task should be dropped or not".
This does not appear to be fixed.
I've not been able to do anything about this one. AFAIK it is still relevant and should be looked into if someone has the time. The test case which is marked with an xfail should still be a good canary for if this misbehaviour remains and if a change fixes it. Unassigning myself and pinging @thedrow and @auvipy for distribution. I'm going to put this in the 5.1 milestone since it's an edge case bug IIRC, and it would be nice to fix it to avoid failing to close out canvas constructs when they error.
I've moved this to Future for now. We'll schedule this for a version soon but I want to handle what is immediately fixable first. | 2021-05-14T05:24:16 |
celery/celery | 6,774 | celery__celery-6774 | [
"6771"
] | 97457bc66116889c796d37965075474424bff3f7 | diff --git a/celery/events/snapshot.py b/celery/events/snapshot.py
--- a/celery/events/snapshot.py
+++ b/celery/events/snapshot.py
@@ -84,7 +84,8 @@ def __exit__(self, *exc_info):
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
- logfile=None, pidfile=None, timer=None, app=None):
+ logfile=None, pidfile=None, timer=None, app=None,
+ **kwargs):
"""Start snapshot recorder."""
app = app_or_default(app)
| Events command always fails when camera is specified
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.05 and master (2411504f4164ac9acfa20007038d37591c6f57e5)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.0b2 (singularity) kombu:5.1.0b1 py:3.9.5
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Darwin arch:64bit
kernel version:20.4.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: Unknown, tested on 3.9
* **Minimal Celery Version**: Unknown, tested on 5.05 and master
* **Minimal Kombu Version**: N/A
* **Minimal Broker Version**: N/A
* **Minimal Result Backend Version**: N/A
* **Minimal OS and/or Kernel Version**: N/A
* **Minimal Broker Client Version**: N/A
* **Minimal Result Backend Client Version**: N/A
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.6
billiard==3.6.4.0
celery @ git+https://github.com/celery/celery.git@2411504f4164ac9acfa20007038d37591c6f57e5
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.1.6
kombu==5.1.0b1
prompt-toolkit==3.0.18
pytz==2021.1
six==1.16.0
vine==5.0.0
wcwidth==0.2.5
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
`app.py`:
```python
import celery
app = celery.Celery()
```
`camera.py`:
```python
from celery.events.snapshot import Polaroid
class Camera(Polaroid):
def on_shutter(self, _):
print("Hello!")
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
The following command should (attempt to) start the event camera:
```
$ celery -A app events -c camera
...
ModuleNotFoundError: No module named 'camera'
```
(The bug is independent of whether the module `camera` exists.)
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
A backtrace is produced:
```
Traceback (most recent call last):
File "/Users/user/Desktop/tmp/venv/bin/celery", line 8, in <module>
sys.exit(main())
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/celery/__main__.py", line 15, in main
sys.exit(_main())
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/celery/bin/celery.py", line 213, in main
return celery(auto_envvar_prefix="CELERY")
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/celery/bin/base.py", line 132, in caller
return f(ctx, *args, **kwargs)
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/celery/bin/events.py", line 90, in events
return _run_evcam(camera, app=app, freq=frequency, maxrate=maxrate,
File "/Users/user/Desktop/tmp/venv/lib/python3.9/site-packages/celery/bin/events.py", line 37, in _run_evcam
return cam()
TypeError: evcam() got an unexpected keyword argument 'executable'
```
| Hey @alexpearce :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
The problem is that the [`events` command](https://github.com/celery/celery/blob/2411504f4164ac9acfa20007038d37591c6f57e5/celery/bin/events.py#L83) [derives its arguments](https://github.com/celery/celery/blob/2411504f4164ac9acfa20007038d37591c6f57e5/celery/bin/events.py#L51) from [`CeleryDaemonCommand`](https://github.com/celery/celery/blob/8ebcce1523d79039f23da748f00bec465951de2a/celery/bin/base.py#L171), and that includes an `--executable` flag. This gets propagated through `**kwargs` until it reaches [`evcam`](https://github.com/celery/celery/blob/2411504f4164ac9acfa20007038d37591c6f57e5/celery/events/snapshot.py#L86) which does not have an `executable` argument.
This patch allows the execution to continue:
```diff
diff --git a/celery/events/snapshot.py b/celery/events/snapshot.py
index 813b8db5c..d611cb280 100644
--- a/celery/events/snapshot.py
+++ b/celery/events/snapshot.py
@@ -84,7 +84,8 @@ class Polaroid:
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
- logfile=None, pidfile=None, timer=None, app=None):
+ logfile=None, pidfile=None, timer=None, app=None,
+ executable=None):
"""Start snapshot recorder."""
app = app_or_default(app)
```
I would open a PR straight away but I'm not sure if the value of `executable` should actually be used by `evcam` for something.
Just make the method accept `**kwargs`. I think that should work.
Sounds reasonable, as long as you don't mind an unused argument.
Shall I open a PR with that?
Yeah. Go ahead. | 2021-05-19T13:46:01 |
|
celery/celery | 6,791 | celery__celery-6791 | [
"6790"
] | c93371d5c7899720d3d17fda1a265c229285ffc0 | diff --git a/celery/utils/log.py b/celery/utils/log.py
--- a/celery/utils/log.py
+++ b/celery/utils/log.py
@@ -214,19 +214,24 @@ def handleError(self, record):
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
+ # type: (AnyStr) -> int
"""Write message to logging object."""
if _in_sighandler:
- return print(safe_str(data), file=sys.__stderr__)
+ safe_data = safe_str(data)
+ print(safe_data, file=sys.__stderr__)
+ return len(safe_data)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
- return
- data = data.strip()
+ return 0
if data and not self.closed:
self._thread.recurse_protection = True
try:
- self.logger.log(self.loglevel, safe_str(data))
+ safe_data = safe_str(data)
+ self.logger.log(self.loglevel, safe_data)
+ return len(safe_data)
finally:
self._thread.recurse_protection = False
+ return 0
def writelines(self, sequence):
# type: (Sequence[str]) -> None
| diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -268,8 +268,9 @@ def test_logging_proxy(self):
p.write('foo')
assert 'foo' not in sio.getvalue()
p.closed = False
- p.write('foo')
- assert 'foo' in sio.getvalue()
+ write_res = p.write('foo ')
+ assert 'foo ' in sio.getvalue()
+ assert write_res == 4
lines = ['baz', 'xuzzy']
p.writelines(lines)
for line in lines:
@@ -290,7 +291,7 @@ def test_logging_proxy_recurse_protection(self):
p = LoggingProxy(logger, loglevel=logging.ERROR)
p._thread.recurse_protection = True
try:
- assert p.write('FOOFO') is None
+ assert p.write('FOOFO') == 0
finally:
p._thread.recurse_protection = False
| Celery breaks libs using `sys.stdout.write()`
`LoggingProxy.write()` currently always returns `None` (because in Python2 that was the correct return type!)
From the [Python docs](https://docs.python.org/release/3.9.2/library/io.html#io.TextIOBase.write):
>`write(s)`
> Write the string s to the stream and return the number of characters written.
Therefore methods that rely on `sys.stdout.write()` expect `LoggingProxy` (which `celery` replaces `sys.stdout` with) to conform to the same `IO.write()` return type.
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [x] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #928
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.closed = False
num_chars = p.write('foo')
assert num_chars == 3
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
`LoggingProxy` returns the number of characters/bytes written
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
`LoggingProxy` returns `None`
| 2021-05-28T15:00:55 |
|
celery/celery | 6,804 | celery__celery-6804 | [
"6797"
] | 51634c34a77f7f183a6af450c07e7aac91a045ce | diff --git a/celery/platforms.py b/celery/platforms.py
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -6,7 +6,6 @@
import atexit
import errno
-import grp
import math
import numbers
import os
@@ -780,6 +779,8 @@ def ignore_errno(*errnos, **kwargs):
def check_privileges(accept_content):
+ if grp is None or pwd is None:
+ return
pickle_or_serialize = ('pickle' in accept_content
or 'application/group-python-serialize' in accept_content)
| diff --git a/t/unit/utils/test_platforms.py b/t/unit/utils/test_platforms.py
--- a/t/unit/utils/test_platforms.py
+++ b/t/unit/utils/test_platforms.py
@@ -911,8 +911,10 @@ def test_check_privileges_with_c_force_root(accept_content):
({'application/group-python-serialize'}, 'wheel'),
({'pickle', 'application/group-python-serialize'}, 'wheel'),
])
-def test_check_privileges_with_c_force_root_and_with_suspicious_group(accept_content, group_name):
- with patch('celery.platforms.os') as os_module, patch('celery.platforms.grp') as grp_module:
+def test_check_privileges_with_c_force_root_and_with_suspicious_group(
+ accept_content, group_name):
+ with patch('celery.platforms.os') as os_module, patch(
+ 'celery.platforms.grp') as grp_module:
os_module.environ = {'C_FORCE_ROOT': 'true'}
os_module.getuid.return_value = 60
os_module.getgid.return_value = 60
@@ -936,8 +938,10 @@ def test_check_privileges_with_c_force_root_and_with_suspicious_group(accept_con
({'application/group-python-serialize'}, 'wheel'),
({'pickle', 'application/group-python-serialize'}, 'wheel'),
])
-def test_check_privileges_without_c_force_root_and_with_suspicious_group(accept_content, group_name):
- with patch('celery.platforms.os') as os_module, patch('celery.platforms.grp') as grp_module:
+def test_check_privileges_without_c_force_root_and_with_suspicious_group(
+ accept_content, group_name):
+ with patch('celery.platforms.os') as os_module, patch(
+ 'celery.platforms.grp') as grp_module:
os_module.environ = {}
os_module.getuid.return_value = 60
os_module.getgid.return_value = 60
@@ -959,8 +963,10 @@ def test_check_privileges_without_c_force_root_and_with_suspicious_group(accept_
{'application/group-python-serialize'},
{'pickle', 'application/group-python-serialize'}
])
-def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content, recwarn):
- with patch('celery.platforms.os') as os_module, patch('celery.platforms.grp') as grp_module:
+def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content,
+ recwarn):
+ with patch('celery.platforms.os') as os_module, patch(
+ 'celery.platforms.grp') as grp_module:
os_module.environ = {'C_FORCE_ROOT': 'true'}
os_module.getuid.return_value = 60
os_module.getgid.return_value = 60
@@ -984,8 +990,10 @@ def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content, r
{'application/group-python-serialize'},
{'pickle', 'application/group-python-serialize'}
])
-def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content, recwarn):
- with patch('celery.platforms.os') as os_module, patch('celery.platforms.grp') as grp_module:
+def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content,
+ recwarn):
+ with patch('celery.platforms.os') as os_module, patch(
+ 'celery.platforms.grp') as grp_module:
os_module.environ = {}
os_module.getuid.return_value = 60
os_module.getgid.return_value = 60
@@ -1001,3 +1009,17 @@ def test_check_privileges_with_c_force_root_and_no_group_entry(accept_content, r
check_privileges(accept_content)
assert recwarn[0].message.args[0] == ASSUMING_ROOT
+
+
+def test_skip_checking_privileges_when_grp_is_unavailable(recwarn):
+ with patch("celery.platforms.grp", new=None):
+ check_privileges({'pickle'})
+
+ assert len(recwarn) == 0
+
+
+def test_skip_checking_privileges_when_pwd_is_unavailable(recwarn):
+ with patch("celery.platforms.pwd", new=None):
+ check_privileges({'pickle'})
+
+ assert len(recwarn) == 0
| ModuleNotFoundError: No module named 'grp'
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
Hey there! I've read all the checkboxes etc. but it's such an obvious bug, that I feel they are not necessary. I hope that this unconformity won't let you just ignore the actual issue:
# Steps to Reproduce
## Minimally Reproducible Test Case
Run celery on Windows, no matter which python.
### Expected Behavior
It should start
### Actual Behavior
```
File ".venv\lib\site-packages\celery\platforms.py", line 9, in <module>
import grp
ModuleNotFoundError: No module named 'grp'
```
The module `grp` is only available on linux, thus this does not work on widnwos and also not make sense as a global import. It's also imported below by `try_import`.
### Possible fix
This line should simply be removed, because below there is a `try_import` for this as well. Also in the `check_privileges`, the non-existence of `grp` should be checked. Unfortunately due to my companies restrictions I cannot submit a MR.
Git blame shows @namloc2001 and @thedrow for [this line which is still on master](https://github.com/celery/celery/blob/ce567e31065e3361493ebb33a23e2f04c07cc371/celery/platforms.py#L9).
Thanks!
| Hey @voegtlel :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
I'm working on a fix. | 2021-06-06T16:18:13 |
celery/celery | 6,818 | celery__celery-6818 | [
"6777"
] | 102eddd7fd9728f4217ef33f21ba604ff0e0addb | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -85,6 +85,7 @@ class Context:
loglevel = None
origin = None
parent_id = None
+ properties = None
retries = 0
reply_to = None
root_id = None
diff --git a/celery/worker/request.py b/celery/worker/request.py
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -93,7 +93,8 @@ def __init__(self, message, on_ack=noop,
maybe_make_aware=maybe_make_aware,
maybe_iso8601=maybe_iso8601, **opts):
self._message = message
- self._request_dict = message.headers if headers is None else headers
+ self._request_dict = (message.headers.copy() if headers is None
+ else headers.copy())
self._body = message.body if body is None else body
self._app = app
self._utc = utc
@@ -157,6 +158,7 @@ def __init__(self, message, on_ack=noop,
'redelivered': delivery_info.get('redelivered'),
}
self._request_dict.update({
+ 'properties': properties,
'reply_to': properties.get('reply_to'),
'correlation_id': properties.get('correlation_id'),
'hostname': self._hostname,
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -305,6 +305,11 @@ def return_priority(self, *_args):
return "Priority: %s" % self.request.delivery_info['priority']
+@shared_task(bind=True)
+def return_properties(self):
+ return self.request.properties
+
+
class ClassBasedAutoRetryTask(Task):
name = 'auto_retry_class_task'
autoretry_for = (ValueError,)
| diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -9,7 +9,8 @@
from .conftest import get_active_redis_channels
from .tasks import (ClassBasedAutoRetryTask, ExpectedException, add,
add_ignore_result, add_not_typed, fail, print_unicode,
- retry, retry_once, retry_once_priority, sleeping)
+ retry, retry_once, retry_once_priority, return_properties,
+ sleeping)
TIMEOUT = 10
@@ -270,6 +271,11 @@ def test_unicode_task(self, manager):
timeout=TIMEOUT, propagate=True,
)
+ @flaky
+ def test_properties(self, celery_session_worker):
+ res = return_properties.apply_async(app_id="1234")
+ assert res.get(timeout=TIMEOUT)["app_id"] == "1234"
+
class tests_task_redis_result_backend:
def setup(self, manager):
| Make AMQP message properties available in task
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
feature requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Issue+Type%3A+Feature+Request%22+)
for similar or identical feature requests.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22PR+Type%3A+Feature%22+)
for existing proposed implementations of this feature.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same feature was already implemented in the
master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
I would like to be able to access arbitrary AMQP message properties (e.g. `user-id`) within my tasks.
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
If the message received from the transport has a `properties` attribute, a reference to the value should be available in the `app.task.Context` instance available as `self.request` in a bound task. If there is no `properties` attribute, the new `Context` attribute may be set to `None`.
## Proposed UI/UX
The new attribute in `app.task.Context` might be named `properties`.
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this feature such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
Application-defined headers are already available to the task, but some functionality may be provided through broker interactions which cannot be replicated using application-defined headers (e.g. RabbitMQ's validated User-ID).
Another option would be to expose each of the remaining AMQP 0-9-1 message properties on the `app.task.Context` object similar to `reply_to`. This would have the disadvantage of introducing broker-specific logic.
| Hey @InvalidInterrupt :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-06-19T23:23:28 |
celery/celery | 6,838 | celery__celery-6838 | [
"6834"
] | c33e9b2a6905a239c45e6f50437394db69fa41db | diff --git a/celery/utils/log.py b/celery/utils/log.py
--- a/celery/utils/log.py
+++ b/celery/utils/log.py
@@ -223,6 +223,7 @@ def write(self, data):
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return 0
+ data = data.rstrip('\n')
if data and not self.closed:
self._thread.recurse_protection = True
try:
| diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -268,8 +268,10 @@ def test_logging_proxy(self):
p.write('foo')
assert 'foo' not in sio.getvalue()
p.closed = False
+ p.write('\n')
+ assert sio.getvalue() == ''
write_res = p.write('foo ')
- assert 'foo ' in sio.getvalue()
+ assert sio.getvalue() == 'foo \n'
assert write_res == 4
lines = ['baz', 'xuzzy']
p.writelines(lines)
| empty line printed to stderr
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [y] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #6791
- #1997
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.2 (sun-harmonics) kombu:5.1.0 py:3.8.5
billiard:3.6.4.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:5.8.0-59-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:disabled
broker_url: 'redis://localhost:6379/0'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: redis in docker
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: Ubuntu 20.04
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.6
billiard==3.6.4.0
celery @ file:///tmp/x/celery
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.2.0
kombu==5.1.0
prompt-toolkit==3.0.19
pytz==2021.1
redis==3.5.3
six==1.16.0
vine==5.0.0
wcwidth==0.2.5
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery import Celery
app = Celery('tasks', broker='redis://localhost:6379/0')
@app.task
def f():
print('hello hello world')
if __name__ == '__main__':
f.apply_async()
```
</p>
</details>
# Expected Behavior
only one line of output
<!-- Describe in detail what you expect to happen -->
```console
-------------- [queues]
.> celery exchange=celery(direct) key=celery
[2021-06-30 19:28:02,606: WARNING/ForkPoolWorker-4] hello hello world
```
# Actual Behavior
prints an empty line
```console
-------------- [queues]
.> celery exchange=celery(direct) key=celery
[2021-06-30 19:28:02,606: WARNING/ForkPoolWorker-4] hello hello world
[2021-06-30 19:28:02,607: WARNING/ForkPoolWorker-4]
```
when printing from the task function, each output causes a newline to be printed to `stderr`. This was introduced in version `5.1.*` in #6791 by removing this line: https://github.com/celery/celery/pull/6791/commits/51f5b01df0434144521b23a35d16aebfee08c3ae#diff-116905b4c2ccc7e0172e26700ba1a0a0270b2fe1f830c47244dc1df07c805883L226
I think there are different options how to address this:
- just add the line back as it was -- but this would go against the initaly intention in #6791 of removing it to don't do any string manipulation in this function
- use something like `re.sub('\n|\r', '', data )` instead, to keep the whitespace or an endswith check or a more complex regex?
- address this somewhere else, so no newline characters are passed to `.write()` (I would need a hint where to do this though)
I am happy to send a PR to fix this if needed.
| Hey @theendlessriver13 :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-07-02T16:42:11 |
celery/celery | 6,849 | celery__celery-6849 | [
"6844"
] | e885a47b0c73aef0112bf989a2642c125889a2ca | diff --git a/celery/app/control.py b/celery/app/control.py
--- a/celery/app/control.py
+++ b/celery/app/control.py
@@ -135,6 +135,8 @@ def clock(self):
def active(self, safe=None):
"""Return list of tasks currently executed by workers.
+ Arguments:
+ safe (Boolean): Set to True to disable deserialization.
Returns:
Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``.
@@ -142,11 +144,8 @@ def active(self, safe=None):
See Also:
For ``TASK_INFO`` details see :func:`query_task` return value.
- Note:
- ``safe`` is ignored since 4.0 as no objects will need
- serialization now that we have argsrepr/kwargsrepr.
"""
- return self._request('active')
+ return self._request('active', safe=safe)
def scheduled(self, safe=None):
"""Return list of scheduled tasks with details.
diff --git a/celery/worker/control.py b/celery/worker/control.py
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -362,9 +362,9 @@ def reserved(state, **kwargs):
@inspect_command(alias='dump_active')
-def active(state, **kwargs):
+def active(state, safe=False, **kwargs):
"""List of tasks currently being executed."""
- return [request.info()
+ return [request.info(safe=safe)
for request in state.tset(worker_state.active_requests)]
diff --git a/celery/worker/request.py b/celery/worker/request.py
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -600,8 +600,8 @@ def info(self, safe=False):
return {
'id': self.id,
'name': self.name,
- 'args': self._args,
- 'kwargs': self._kwargs,
+ 'args': self._args if not safe else self._argsrepr,
+ 'kwargs': self._kwargs if not safe else self._kwargsrepr,
'type': self._type,
'hostname': self._hostname,
'time_start': self.time_start,
| diff --git a/t/unit/app/test_control.py b/t/unit/app/test_control.py
--- a/t/unit/app/test_control.py
+++ b/t/unit/app/test_control.py
@@ -95,7 +95,11 @@ def assert_broadcast_called(self, command,
def test_active(self):
self.inspect.active()
- self.assert_broadcast_called('active')
+ self.assert_broadcast_called('active', safe=None)
+
+ def test_active_safe(self):
+ self.inspect.active(safe=True)
+ self.assert_broadcast_called('active', safe=True)
def test_clock(self):
self.inspect.clock()
diff --git a/t/unit/worker/test_control.py b/t/unit/worker/test_control.py
--- a/t/unit/worker/test_control.py
+++ b/t/unit/worker/test_control.py
@@ -298,6 +298,20 @@ def test_active(self):
finally:
worker_state.active_requests.discard(r)
+ def test_active_safe(self):
+ kwargsrepr = '<anything>'
+ r = Request(
+ self.TaskMessage(self.mytask.name, id='do re mi',
+ kwargsrepr=kwargsrepr),
+ app=self.app,
+ )
+ worker_state.active_requests.add(r)
+ try:
+ active_resp = self.panel.handle('dump_active', {'safe': True})
+ assert active_resp[0]['kwargs'] == kwargsrepr
+ finally:
+ worker_state.active_requests.discard(r)
+
def test_pool_grow(self):
class MockPool:
diff --git a/t/unit/worker/test_request.py b/t/unit/worker/test_request.py
--- a/t/unit/worker/test_request.py
+++ b/t/unit/worker/test_request.py
@@ -232,7 +232,7 @@ def test_info_function(self):
kwargs[str(i)] = ''.join(
random.choice(string.ascii_lowercase) for i in range(1000))
assert self.get_request(
- self.add.s(**kwargs)).info(safe=True).get('kwargs') == kwargs
+ self.add.s(**kwargs)).info(safe=True).get('kwargs') == '' # mock message doesn't populate kwargsrepr
assert self.get_request(
self.add.s(**kwargs)).info(safe=False).get('kwargs') == kwargs
args = []
@@ -240,7 +240,7 @@ def test_info_function(self):
args.append(''.join(
random.choice(string.ascii_lowercase) for i in range(1000)))
assert list(self.get_request(
- self.add.s(*args)).info(safe=True).get('args')) == args
+ self.add.s(*args)).info(safe=True).get('args')) == [] # mock message doesn't populate argsrepr
assert list(self.get_request(
self.add.s(*args)).info(safe=False).get('args')) == args
| app.control.inspect().active() requires deserialization
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #3667
- #6672
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.2 (sun-harmonics) kombu:5.1.0 py:3.8.10
billiard:3.6.4.0 redis:3.5.3
platform -> system:Linux arch:64bit, ELF
kernel version:3.10.0-1160.11.1.el7.x86_64 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:redis://localhost:6379/0
broker_url: 'redis://localhost:6379/0'
result_backend: 'redis://localhost:6379/0'
task_serializer: 'pickle'
accept_content: ['pickle']
deprecated_settings: None
worker_concurrency: 1
result_serializer: 'pickle'
event_serializer: 'pickle'
```
</p>
</details>
# Steps to Reproduce
The breakage was introduced by 40e2212208eafab819216 first released in Celery v4.4.0 (42a5befd07a94e35 works).
Prior to 40e221220, the result of active() had `kwargs` and `args` as strings and therefore never required deserialization:
```
{'celery@worker1': [{'id': 'f12b987c-a711-4a8f-8d3c-532e922ebeea', 'name': 'celery_revoke_retry.sleep', 'args': '()', 'kwargs': "{'sleep_arg': <__main__.SleepArg object at 0x7fec3c059e80>}", 'type': 'celery_revoke_retry.sleep', 'hostname': 'celery@worker1', 'time_start': 1625674850.914238, 'acknowledged': True, 'delivery_info': {'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': None}, 'worker_pid': 29697}]}
```
40e2212208eafab819216 requires clients to be able to deserialize task arguments. This re-introduces bug #3667 and makes the
comment of app.control.active no longer accurate:
```
no objects will need serialization now that we have argsrepr/kwargsrepr.
```
The fix for #3667 specifically states it's required for the payload to be JSON serializable (i.e. not require deserializing pickled task arguments):
https://github.com/smartlane/celery/commit/cbe89437fda38ee1a98376d694d9d3028e683b50.
A reasonable fix would be to add back support for the `safe` argument to `active()` to make the v4.3.1 behaviour possible again.
Currently (~v5.1.0), querying active tasks when some argument can't be deserialized causes `active()` to throw kombu.exceptions.DecodeError.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Celery Version**: 4.4.0
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
Query app.control.inspect().active() from a context that cannot deserialize an active task's argument (SleepArg in the example below).
This program creates the task and sleeps:
```python
import threading
import time
from celery import Celery
app = Celery('celery_example', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0')
app.conf.update(
worker_concurrency=1,
task_serializer = 'pickle',
accept_content = ['pickle'],
result_serializer = 'pickle',
)
class SleepArg:
def __init__(self, sleep_time) -> None:
self.sleep_time = sleep_time
@app.task
def sleep(sleep_arg: SleepArg):
print("running sleep")
time.sleep(sleep_arg.sleep_time)
def main():
argv1 = ['--app=celery_example', 'worker', '--loglevel=DEBUG', '--hostname', 'worker1']
threading.Thread(target=app.worker_main, args=(argv1,)).start()
time.sleep(5)
print('Celery Worker1 should be up now')
sleep.s(sleep_arg=SleepArg(60)).apply_async().get(disable_sync_subtasks=False)
if __name__ == '__main__':
main()
```
This program attempts to query active tasks, which fails because it can't deserialize `SleepArg`:
```python
from celery import Celery
if __name__ == '__main__':
app = app = Celery(broker='redis://localhost:6379/0', backend='redis://localhost:6379/0')
app.conf.update(
worker_concurrency=1,
task_serializer = 'pickle',
accept_content = ['pickle', 'json'],
)
print(app.control.inspect(timeout=4).active())
```
</p>
</details>
# Expected Behavior
It should be possible to query active tasks from contexts that can't necessarily de-serialize task arguments. There is lots of useful information in the active query beyond the task arguments.
Historically, Celery offered the `safe` argument to `active()`, but this was dropped in favour of preventing data from requiring
deserialization from being in the response. 40e2212208ea breaks this.
# Actual Behavior
The program that queries active tasks hits the following exception.
```
Traceback (most recent call last):
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 42, in _reraise_errors
yield
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 255, in loads
return decode(data)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 327, in unpickle
return pickle_loads(str_to_bytes(s))
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 51, in pickle_loads
return load(BytesIO(s))
AttributeError: Can't get attribute 'SleepArg' on <module '__main__' from '../firex_ws/plugins/celery_list_active.py'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "../firex_ws/plugins/celery_list_active.py", line 10, in <module>
print(app.control.inspect(timeout=4).active())
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/celery/app/control.py", line 149, in active
return self._request('active')
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/celery/app/control.py", line 106, in _request
return self._prepare(self.app.control.broadcast(
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/celery/app/control.py", line 741, in broadcast
return self.mailbox(conn)._broadcast(
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/pidbox.py", line 347, in _broadcast
return self._collect(reply_ticket, limit=limit,
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/pidbox.py", line 389, in _collect
self.connection.drain_events(timeout=timeout)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/connection.py", line 318, in drain_events
return self.transport.drain_events(self.connection, **kwargs)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 960, in drain_events
get(self._deliver, timeout=timeout)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/redis.py", line 420, in get
ret = self.handle_event(fileno, event)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/redis.py", line 402, in handle_event
return self.on_readable(fileno), self
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/redis.py", line 398, in on_readable
chan.handlers[type]()
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/redis.py", line 789, in _brpop_read
self.connection._deliver(loads(bytes_to_str(item)), dest)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 980, in _deliver
callback(message)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 630, in _callback
return callback(message)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/messaging.py", line 620, in _receive_callback
decoded = None if on_m else message.decode()
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/message.py", line 190, in decode
self._decoded_cache = self._decode()
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/message.py", line 194, in _decode
return loads(self.body, self.content_type,
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 255, in loads
return decode(data)
File "/auto/firex/sw/python/3.8.10/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 46, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/exceptions.py", line 21, in reraise
raise value.with_traceback(tb)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 42, in _reraise_errors
yield
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 255, in loads
return decode(data)
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 327, in unpickle
return pickle_loads(str_to_bytes(s))
File "/ws/djungic-sjc/celery_venv/venv/lib/python3.8/site-packages/kombu/serialization.py", line 51, in pickle_loads
return load(BytesIO(s))
kombu.exceptions.DecodeError: Can't get attribute 'SleepArg' on <module '__main__' from '../firex_ws/plugins/celery_list_active.py'>
```
| Hey @djungic :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-07-08T15:49:17 |
celery/celery | 6,853 | celery__celery-6853 | [
"6836"
] | 5fd182417d9a6cb1b5aebe29916814d7a725e62a | diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -294,7 +294,7 @@ def __repr__(self):
cancel_long_running_tasks_on_connection_loss=Option(
False, type='bool'
),
- concurrency=Option(0, type='int'),
+ concurrency=Option(None, type='int'),
consumer=Option('celery.worker.consumer:Consumer', type='string'),
direct=Option(False, type='bool', old={'celery_worker_direct'}),
disable_rate_limits=Option(
| Process worker prefork behavior with `app.conf.update(config)`
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [x] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- https://github.com/celery/celery/issues/6307
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.2 (sun-harmonics) kombu:5.1.0 py:3.9.1
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Darwin arch:64bit
kernel version:20.5.0 imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.6
appdirs==1.4.4
billiard==3.6.4.0
black==20.8b1
celery==5.1.2
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.2.0
flake8==3.9.2
Flask==2.0.1
isort==5.9.1
itsdangerous==2.0.1
Jinja2==3.0.1
kombu==5.1.0
MarkupSafe==2.0.1
mccabe==0.6.1
motor==2.4.0
mypy-extensions==0.4.3
numpy==1.21.0
pathspec==0.8.1
prompt-toolkit==3.0.19
pycodestyle==2.7.0
pyflakes==2.3.1
pymongo==3.11.4
pytz==2021.1
PyYAML==5.4.1
regex==2021.4.4
six==1.16.0
toml==0.10.2
typed-ast==1.4.3
typing-extensions==3.10.0.0
vine==5.0.0
wcwidth==0.2.5
Werkzeug==2.0.1
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
`celery.py`
```python
from celery import Celery, signals
from click import Option
import codecs
import yaml
def setup(yml_path: str):
global CONF
with codecs.open(yml_path, "r", "utf-8") as config_file:
CONF = yaml.load(config_file, Loader=yaml.FullLoader)
def load_config(yml_path: str):
conf =setup(yml_path) # Load conf file from custom yaml
celery_config = conf.get("celery", {})
app_config = celery_config.get("app", {})
app.conf.update(app_config)
return conf
def _create_celery():
app = Celery(include=["ais.tasks"])
app.user_options["preload"] = [
Option(["--config", "-C"], default="config/config.yml")
]
return app
"""
signal handlers
Warning: signal handlers only executed on server side
"""
@signals.user_preload_options.connect
def handle_preload_options(options, **kwargs):
load_config(options["config"])
app = _create_celery()
```
`config.yml`
```yaml
celery:
app:
app_name: parser-worker
worker_concurrency: 4
broker_url: redis://localhost:6379/0
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Concurrency worker prefork should be 4 process by `app.conf.update` from `config.yml`
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Concurrency worker still prefork 8 process
# Trace Code
## The worker init flow with `click` confused
Module path `.venv/lib/python3.9/site-packages/celery/bin/worker.py`
I found use `click` lib instead of `argparse` in [bin/worker](https://github.com/celery/celery/blob/master/celery/bin/worker.py#L183)
It use `value or ctx.obj.app.conf.worker_concurrency` for lambda value
we hope `ctx.obj.app.conf.worker_concurrency` be any int but `0`.
Unfortunately, it seems not in this case.
`ctx.obj.app.conf.worker_concurrency` was setup after `@handle_preload_options` according to call stack.
The `concurrency` will also be `0` in [worker/worker.py](https://github.com/celery/celery/blob/master/celery/worker/worker.py#L376), though it use `either` func to ` either('worker_concurrency', concurrency)`.
| Hey @wangha81 :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
can you work on providing the fix?
Sorry, I don't know which behaviors below is right
- the value at `click` step would set if `worker_concurrency` was given
- `this.concurrency = either('worker_concurrency', concurrency)` would set but `concurrency` if `worker_concurrency` was given
btw, I'm not yet trace the `either` workflow, so I don't know how `either` method works.
Maybe trace in free time but I have a little busy recently.
Thanks for your response | 2021-07-12T13:46:48 |
|
celery/celery | 6,863 | celery__celery-6863 | [
"6855"
] | ca489c6f7767ed796bce10400321fe08b4820c0c | diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -978,10 +978,14 @@ def _write_ack(fd, ack, callback=None):
def flush(self):
if self._state == TERMINATE:
return
- # cancel all tasks that haven't been accepted so that NACK is sent.
- for job in self._cache.values():
+ # cancel all tasks that haven't been accepted so that NACK is sent
+ # if synack is enabled.
+ for job in tuple(self._cache.values()):
if not job._accepted:
- job._cancel()
+ if self.synack:
+ job._cancel()
+ else:
+ job.discard()
# clear the outgoing buffer as the tasks will be redelivered by
# the broker anyway.
| Jobs in AsynPool remain in cache forever if the connection is dropped
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.1.2
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
celery -A myapp report
software -> celery:5.1.2 (sun-harmonics) kombu:5.1.0 py:3.9.0
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Linux arch:64bit, ELF
kernel version:5.11.0-22-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:disabled
broker_url: 'amqp://guest:********@localhost:5672//'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 1.x
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
aiohttp==3.7.4.post0
alabaster==0.7.12
amqp==5.0.6
appdirs==1.4.4
async-timeout==3.0.1
attrs==20.3.0
autoflake==1.4
autopep8==1.5.7
aws-xray-sdk==0.95
azure-common==1.1.5
azure-nspkg==3.0.2
azure-storage==0.36.0
azure-storage-common==1.1.0
azure-storage-nspkg==3.1.0
Babel==2.8.0
backcall==0.2.0
billiard==3.6.4.0
bleach==3.2.1
boto==2.49.0
boto3==1.16.9
botocore==1.19.9
bump2version==1.0.1
bumpversion==0.6.0
case==1.5.3
cassandra-driver==3.20.2
-e [email protected]:celery/celery.git@7b5a44d646f43288fb546da10a1141347b01543b#egg=celery
Cerberus==1.3.2
certifi==2020.12.5
cffi==1.14.5
cfgv==3.2.0
chardet==4.0.0
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.1.6
codecov==2.1.11
colorama==0.4.4
couchbase==3.1.3
coverage==5.4
cryptography==3.4.6
DateTime==4.3
decorator==4.4.2
deepmerge==0.1.1
dephell-archive==0.1.7
dephell-argparse==0.1.3
dephell-changelogs==0.0.1
dephell-discover==0.2.10
dephell-licenses==0.1.7
dephell-links==0.1.5
dephell-markers==1.0.3
dephell-pythons==0.1.15
dephell-setuptools==0.2.4
dephell-shells==0.1.5
dephell-specifier==0.2.2
dephell-venvs==0.1.18
dephell-versioning==0.1.2
diff-cover==4.2.3
distlib==0.3.1
dnspython==1.16.0
docker==4.3.1
docutils==0.16
durationpy==0.5
ecdsa==0.16.0
elasticsearch==7.9.1
ephem==3.7.7.1
eventlet==0.29.1
filelock==3.0.12
flower==0.9.5
future==0.18.2
gevent==21.1.2
greenlet==0.4.17
humanize==3.1.0
identify==1.5.6
idna==2.10
imagesize==1.2.0
importlib-metadata==4.0.1
inflect==3.0.2
iniconfig==1.1.1
ipython==7.19.0
ipython-genutils==0.2.0
isort==5.9.1
jedi==0.17.2
jeepney==0.6.0
Jinja2==2.11.2
jinja2-pluralize==0.3.0
jmespath==0.10.0
jsondiff==1.1.1
jsonpickle==1.4.1
keyring==21.5.0
kombu==5.1.0
linecache2==1.0.0
m2r==0.2.1
MarkupSafe==1.1.1
mistune==0.8.4
mock==4.0.2
moto==1.3.7
msgpack==1.0.0
multidict==5.1.0
mypy-extensions==0.4.3
nodeenv==1.5.0
nose==1.3.7
packaging==20.9
parso==0.7.1
pbr==5.5.1
pexpect==4.8.0
pickleshare==0.7.5
pkginfo==1.6.1
pluggy==0.13.1
pre-commit==2.8.2
prometheus-client==0.8.0
prompt-toolkit @ file:///home/thedrow/.cache/pypoetry/artifacts/85/ed/40/229a34dee2081fd935c85f600bfcddb0974a96a88f099412c62b58d4fa/prompt_toolkit-3.0.3-py3-none-any.whl
ptyprocess==0.6.0
py==1.10.0
pyaml==20.4.0
pyArango==1.3.4
pycodestyle==2.7.0
pycouchdb==1.14.1
pycparser==2.20
pycryptodome==3.9.8
pycurl==7.43.0.5
pydocumentdb==2.3.2
pyflakes==2.2.0
Pygments==2.7.3
pylibmc==1.6.1
pymongo==3.11.0
pyparsing==2.4.7
pyrsistent==0.17.3
pytest==6.2.2
pytest-celery==0.0.0a1
pytest-cov==2.11.1
pytest-rerunfailures==9.1.1
pytest-subtests==0.3.2
pytest-timeout==1.4.2
pytest-travis-fold==1.3.0
python-consul==1.1.0
python-dateutil==2.8.1
python-jose==2.0.2
python-memcached==1.59
pytz @ file:///home/thedrow/.cache/pypoetry/artifacts/b0/a7/8d/54de3ab4d1ff29abbbca1e9ccbaefdc2a1b290138311b84f73bee16de1/pytz-2021.1-py2.py3-none-any.whl
pyupgrade==2.19.1
pyupgrade-directories==0.2.0
PyYAML==5.3.1
readme-renderer==28.0
redis==3.5.3
requests==2.25.1
requests-toolbelt==0.9.1
responses==0.12.0
rfc3986==1.4.0
ruamel.yaml==0.16.13
ruamel.yaml.clib==0.2.2
s3transfer==0.3.3
SecretStorage==3.3.0
shellingham==1.4.0
simplejson==3.17.2
six==1.15.0
snowballstemmer==2.0.0
softlayer-messaging==1.0.3
Sphinx==3.3.0
sphinx-celery==2.0.0
sphinx-click==2.5.0
sphinx-testing==0.7.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==1.0.3
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.4
SQLAlchemy==1.3.23
tblib==1.7.0
tokenize-rt==4.1.0
toml==0.10.2
toml-sort==0.18.0
tomlkit==0.7.0
tornado==6.1
tqdm==4.54.1
traceback2==1.4.0
traitlets==5.0.5
twine==3.4.1
typing-extensions==3.7.4.3
unittest2==1.1.0
urllib3==1.26.3
vine==5.0.0
virtualenv==20.1.0
wcwidth==0.2.5
webencodings==0.5.1
websocket-client==0.57.0
Werkzeug==1.0.1
wrapt==1.12.1
xmltodict==0.12.0
yarl==1.6.3
yaspin==1.4.1
zipp==3.4.0
zope.event==4.5.0
zope.interface==5.1.2
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
To reproduce the issue you must force close the connection and cause AsynPool to be flushed.
Missing a heartbeat due to a breakpoint does the trick.
See https://github.com/celery/celery/blob/681e72edb918c8ff315665a6abbfc6dd99f303e2/celery/concurrency/asynpool.py#L978-L1038
<details>
<p>
```python
# Sending any task would do
some_task.delay()
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
The pool's job cache should be completely empty after we flush it.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Canceled jobs remain in the cache and are never cleaned up since the pool doesn't use the worker's synack capability (not sure what that is, actually).
This is a problem since we keep the job's payload (which may be arbitrarily large) forever.
Given enough uptime, whenever there are enough connection drops, we accumulate new jobs that are never garbage collected.
This has been the case from the moment AsynPool has been written, so it seems.
| 2021-07-19T11:52:04 |
||
celery/celery | 6,866 | celery__celery-6866 | [
"6857"
] | ef026ea44f59e5d234c195c3ce73927f8323f9ee | diff --git a/celery/bin/control.py b/celery/bin/control.py
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -144,6 +144,8 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
if json:
ctx.obj.echo(dumps(replies))
+ return
+
nodecount = len(replies)
if not ctx.obj.quiet:
ctx.obj.echo('\n{} {} online.'.format(
| Celery inspect output JSON serialization is broken
# Checklist
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Environment & Settings
**Celery version**: 5.1.2 (sun-harmonics)
# Steps to Reproduce
Execute `celery -A proj inspect -j active`
# Expected Behavior
Valid JSON string is returned, like:
```
{"worker-1": [], "worker-2": [], "worker-3": []}
```
# Actual Behavior
Command returns valid JSON in the first line, followed by double newline and some useless summary:
```
{"worker-1": [], "worker-2": [], "worker-3": []}
3 nodes online.
```
which makes the overall output an invalid JSON string.
| Hey @adampl :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
you probably want to use `--quiet` here:
edit: docs: https://docs.celeryproject.org/en/stable/reference/cli.html#cmdoption-celery-q
```console
(venv) jonas@ububox:~/workspace/celery$ celery -A tasks --quiet inspect --json active | jq
{
"celery@ububox": []
}
```
if `--json` should imply the `--quiet` argument, we might want to change it this way. If the maintainers agree, I'd be happy to send this as a PR. Otherwise `--quiet` works well.
```diff
diff --git a/celery/bin/control.py b/celery/bin/control.py
index a13963a54..fbd3730c4 100644
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -144,6 +144,8 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
if json:
ctx.obj.echo(dumps(replies))
+ return
+
nodecount = len(replies)
if not ctx.obj.quiet:
ctx.obj.echo('\n{} {} online.'.format(
```
i think you should send PR :) | 2021-07-20T17:55:16 |
|
celery/celery | 6,898 | celery__celery-6898 | [
"6885",
"6885"
] | 846066a34413509695434ed5a661280d7db4f993 | diff --git a/celery/app/trace.py b/celery/app/trace.py
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -527,6 +527,8 @@ def trace_task(uuid, args, kwargs, request=None):
'name': get_task_name(task_request, name),
'return_value': Rstr,
'runtime': T,
+ 'args': safe_repr(args),
+ 'kwargs': safe_repr(kwargs),
})
# -* POST *-
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -2,6 +2,7 @@
import logging
from kombu.asynchronous.timer import to_timestamp
+from kombu.utils.encoding import safe_repr
from celery import signals
from celery.app import trace as _app_trace
@@ -151,7 +152,12 @@ def task_message_handler(message, body, ack, reject, callbacks,
if _does_info:
# Similar to `app.trace.info()`, we pass the formatting args as the
# `extra` kwarg for custom log handlers
- context = {'id': req.id, 'name': req.name}
+ context = {
+ 'id': req.id,
+ 'name': req.name,
+ 'args': safe_repr(req.args),
+ 'kwargs': safe_repr(req.kwargs),
+ }
info(_app_trace.LOG_RECEIVED, context, extra={'data': context})
if (req.expires or req.id in revoked_tasks) and req.revoked():
return
| diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py
--- a/t/unit/worker/test_strategy.py
+++ b/t/unit/worker/test_strategy.py
@@ -191,7 +191,7 @@ def test_log_task_received_custom(self, caplog):
C()
for record in caplog.records:
if record.msg == custom_fmt:
- assert set(record.args) == {"id", "name"}
+ assert set(record.args) == {"id", "name", "kwargs", "args"}
break
else:
raise ValueError("Expected message not in captured log records")
| Include args in Received (like Rejected)
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [X] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
None that I'm aware of
# Brief Summary
Logging `args` is really helpful, but it's currently only done with `Rejected` logs and it would be nice to also include in `Received` logs
See https://github.com/celery/celery/blob/f02d7c60051ce5202349fe7c795ebf5000d9526d/celery/app/trace.py#L259
# Design
## Architectural Considerations
If a task takes a lot of args or large args, then it could make logs large
## Proposed Behavior
Include `args` in `Received` logs
## Proposed UI/UX
On by default with a way to turn it off
## Diagrams
N/A
## Alternatives
Off by default with a way to turn it on
Include args in Received (like Rejected)
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [X] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
None that I'm aware of
# Brief Summary
Logging `args` is really helpful, but it's currently only done with `Rejected` logs and it would be nice to also include in `Received` logs
See https://github.com/celery/celery/blob/f02d7c60051ce5202349fe7c795ebf5000d9526d/celery/app/trace.py#L259
# Design
## Architectural Considerations
If a task takes a lot of args or large args, then it could make logs large
## Proposed Behavior
Include `args` in `Received` logs
## Proposed UI/UX
On by default with a way to turn it off
## Diagrams
N/A
## Alternatives
Off by default with a way to turn it on
| Hey @daveisfera :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
Hey @daveisfera :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-08-05T22:14:55 |
celery/celery | 6,899 | celery__celery-6899 | [
"6882"
] | 9e435228cb106588f408ae71b9d703ff81a80531 | diff --git a/celery/backends/base.py b/celery/backends/base.py
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -185,29 +185,35 @@ def mark_as_failure(self, task_id, exc,
except (AttributeError, TypeError):
chain_data = tuple()
for chain_elem in chain_data:
- chain_elem_opts = chain_elem['options']
+ # Reconstruct a `Context` object for the chained task which has
+ # enough information to for backends to work with
+ chain_elem_ctx = Context(chain_elem)
+ chain_elem_ctx.update(chain_elem_ctx.options)
+ chain_elem_ctx.id = chain_elem_ctx.options.get('task_id')
+ chain_elem_ctx.group = chain_elem_ctx.options.get('group_id')
# If the state should be propagated, we'll do so for all
# elements of the chain. This is only truly important so
# that the last chain element which controls completion of
# the chain itself is marked as completed to avoid stalls.
- if store_result and state in states.PROPAGATE_STATES:
- try:
- chained_task_id = chain_elem_opts['task_id']
- except KeyError:
- pass
- else:
- self.store_result(
- chained_task_id, exc, state,
- traceback=traceback, request=chain_elem
- )
+ #
+ # Some chained elements may be complex signatures and have no
+ # task ID of their own, so we skip them hoping that not
+ # descending through them is OK. If the last chain element is
+ # complex, we assume it must have been uplifted to a chord by
+ # the canvas code and therefore the condition below will ensure
+ # that we mark something as being complete as avoid stalling.
+ if (
+ store_result and state in states.PROPAGATE_STATES and
+ chain_elem_ctx.task_id is not None
+ ):
+ self.store_result(
+ chain_elem_ctx.task_id, exc, state,
+ traceback=traceback, request=chain_elem_ctx,
+ )
# If the chain element is a member of a chord, we also need
# to call `on_chord_part_return()` as well to avoid stalls.
- if 'chord' in chain_elem_opts:
- failed_ctx = Context(chain_elem)
- failed_ctx.update(failed_ctx.options)
- failed_ctx.id = failed_ctx.options['task_id']
- failed_ctx.group = failed_ctx.options['group_id']
- self.on_chord_part_return(failed_ctx, state, exc)
+ if 'chord' in chain_elem_ctx.options:
+ self.on_chord_part_return(chain_elem_ctx, state, exc)
# And finally we'll fire any errbacks
if call_errbacks and request.errbacks:
self._call_task_errbacks(request, exc, traceback)
| Error handler on a task chain not called (Celery 5.1.x with amqp broker and rpc backend)
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.1.2 (sun-harmonics) kombu:5.1.0 py:3.8.7
billiard:3.6.4.0 py-amqp:5.0.6
platform -> system:Darwin arch:64bit
kernel version:19.6.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:rpc:///
deprecated_settings: None
broker_url: 'amqp://****:********@localhost:5672//'
result_backend: 'rpc:///'
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 5.1.0
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.0.6
billiard==3.6.4.0
celery==5.1.2
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.2.0
kombu==5.1.0
prompt-toolkit==3.0.19
pytz==2021.1
six==1.16.0
vine==5.0.0
wcwidth==0.2.5
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
We are chaining tasks together and then adding an errback to the chain.
The expectation is that if a task in the chain fails, then the errback will be called,
and the rest of the chain skipped. Our broker is `amqp` (RabbitMQ), and we are using the `rpc` backend.
This feature worked for 4.4.x and works in 5.0.5. Starting in 5.1.0, the Celery
worker will throw an internal exception and the errback is not called. Below is a simple test case:
<details>
<p>
*tasks.py*
```python
from celery import Celery
from celery.utils.log import get_task_logger
app = Celery()
app.conf.broker_url = "amqp://****:****@localhost:5672/"
app.conf.result_backend = "rpc://"
logger = get_task_logger(__name__)
@app.task
def task1():
logger.info("TASK1")
raise ValueError('foo')
@app.task
def task2():
logger.info("TASK2")
@app.task
def error_handler(request, exc, traceback):
logger.error("ERROR HANDLER")
logger.error('Task {0} raised exception: {1!r}'.format(
request.id, exc))
```
Then run `task1` and `task2` in a chain using `error_handler` as the errback:
```python
from tasks import error_handler, task1, task2
chain = (task1.s() | task2.s())
x = chain.apply_async(link_error=error_handler.s())
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
`task1` will fail and `error_handler` should be called:
```
[INFO/MainProcess] Task tasks.task1[35e2f20e-fc5b-4889-8ae0-9c1a593a15ec] received
[INFO/ForkPoolWorker-7] tasks.task1[35e2f20e-fc5b-4889-8ae0-9c1a593a15ec]: TASK1
[ERROR/ForkPoolWorker-7] tasks.error_handler[None]: ERROR HANDLER
[ERROR/ForkPoolWorker-7] tasks.error_handler[None]: Task 35e2f20e-fc5b-4889-8ae0-9c1a593a15ec raised exception: ValueError('foo')
[ERROR/ForkPoolWorker-7] Task tasks.task1[35e2f20e-fc5b-4889-8ae0-9c1a593a15ec] raised unexpected: ValueError('foo')
```
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
The Celery worker logs the following stack trace and `error_handler` is never called:
```
[ERROR/MainProcess] Pool callback raised exception: AttributeError("'dict' object has no attribute 'reply_to'")
Traceback (most recent call last):
File "/****/share/virtualenvs/celery5-reply-to-reL48Jin/lib/python3.8/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
fun(*args, **kwargs)
File "/****/share/virtualenvs/celery5-reply-to-reL48Jin/lib/python3.8/site-packages/celery/worker/request.py", line 571, in on_failure
self.task.backend.mark_as_failure(
File "/****/share/virtualenvs/celery5-reply-to-reL48Jin/lib/python3.8/site-packages/celery/backends/base.py", line 199, in mark_as_failure
self.store_result(
File "/****/share/virtualenvs/celery5-reply-to-reL48Jin/lib/python3.8/site-packages/celery/backends/rpc.py", line 198, in store_result
routing_key, correlation_id = self.destination_for(task_id, request)
File "/****/share/virtualenvs/celery5-reply-to-reL48Jin/lib/python3.8/site-packages/celery/backends/rpc.py", line 179, in destination_for
return request.reply_to, request.correlation_id or task_id
AttributeError: 'dict' object has no attribute 'reply_to'
```
| Hey @ppiatko :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
can you add the test script?
There's a small example to reproduce the error in the "Details" section of "Minimally Reproducible Test Case". Did you need something more? Thanks!
got it
I've got the same issue, after some investigation I realized that could be related to this piece of code in backends/base.py:173
```python
if request:
# This task may be part of a chord
if request.chord:
self.on_chord_part_return(request, state, exc)
# It might also have chained tasks which need to be propagated to,
# this is most likely to be exclusive with being a direct part of a
# chord but we'll handle both cases separately.
#
# The `chain_data` try block here is a bit tortured since we might
# have non-iterable objects here in tests and it's easier this way.
try:
chain_data = iter(request.chain)
except (AttributeError, TypeError):
chain_data = tuple()
for chain_elem in chain_data:
chain_elem_opts = chain_elem['options']
# If the state should be propagated, we'll do so for all
# elements of the chain. This is only truly important so
# that the last chain element which controls completion of
# the chain itself is marked as completed to avoid stalls.
if self.store_result and state in states.PROPAGATE_STATES:
try:
chained_task_id = chain_elem_opts['task_id']
except KeyError:
pass
else:
self.store_result(
chained_task_id, exc, state,
traceback=traceback, request=chain_elem
)
```
Looks like after raising the Exception, the chain iters again and a dictionary is sent to the rpc backend instead of a request instance.
Ah, interestingly I reconstructed a `Context` for the `chain_elem` to be failed a few lines below that busted call to `self.store_result()` when the same `chain_elem` is passed to `on_chord_part_return()`. I must have assumed that backends would be fine with a dictionary but did know that the redis OCPR implementation expects a `Context` object.
I think this should be a fairly straightforward fix. | 2021-08-05T23:44:55 |
|
celery/celery | 6,917 | celery__celery-6917 | [
"5209"
] | ad994719bafe6747af6cf8251efb0925284a9260 | diff --git a/celery/concurrency/eventlet.py b/celery/concurrency/eventlet.py
--- a/celery/concurrency/eventlet.py
+++ b/celery/concurrency/eventlet.py
@@ -2,6 +2,7 @@
import sys
from time import monotonic
+from greenlet import GreenletExit
from kombu.asynchronous import timer as _timer
from celery import signals
@@ -93,6 +94,7 @@ class TaskPool(base.BasePool):
is_green = True
task_join_will_block = False
_pool = None
+ _pool_map = None
_quick_put = None
def __init__(self, *args, **kwargs):
@@ -107,8 +109,9 @@ def __init__(self, *args, **kwargs):
def on_start(self):
self._pool = self.Pool(self.limit)
+ self._pool_map = {}
signals.eventlet_pool_started.send(sender=self)
- self._quick_put = self._pool.spawn_n
+ self._quick_put = self._pool.spawn
self._quick_apply_sig = signals.eventlet_pool_apply.send
def on_stop(self):
@@ -119,12 +122,17 @@ def on_stop(self):
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
- self._quick_apply_sig(
- sender=self, target=target, args=args, kwargs=kwargs,
+ target = TaskPool._make_killable_target(target)
+ self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,)
+ greenlet = self._quick_put(
+ apply_target,
+ target, args,
+ kwargs,
+ callback,
+ accept_callback,
+ self.getpid
)
- self._quick_put(apply_target, target, args, kwargs,
- callback, accept_callback,
- self.getpid)
+ self._add_to_pool_map(id(greenlet), greenlet)
def grow(self, n=1):
limit = self.limit + n
@@ -136,6 +144,12 @@ def shrink(self, n=1):
self._pool.resize(limit)
self.limit = limit
+ def terminate_job(self, pid, signal=None):
+ if pid in self._pool_map.keys():
+ greenlet = self._pool_map[pid]
+ greenlet.kill()
+ greenlet.wait()
+
def _get_info(self):
info = super()._get_info()
info.update({
@@ -144,3 +158,24 @@ def _get_info(self):
'running-threads': self._pool.running(),
})
return info
+
+ @staticmethod
+ def _make_killable_target(target):
+ def killable_target(*args, **kwargs):
+ try:
+ return target(*args, **kwargs)
+ except GreenletExit:
+ return (False, None, None)
+ return killable_target
+
+ def _add_to_pool_map(self, pid, greenlet):
+ self._pool_map[pid] = greenlet
+ greenlet.link(
+ TaskPool._cleanup_after_job_finish,
+ self._pool_map,
+ pid
+ )
+
+ @staticmethod
+ def _cleanup_after_job_finish(greenlet, pool_map, pid):
+ del pool_map[pid]
| diff --git a/t/unit/concurrency/test_eventlet.py b/t/unit/concurrency/test_eventlet.py
--- a/t/unit/concurrency/test_eventlet.py
+++ b/t/unit/concurrency/test_eventlet.py
@@ -2,6 +2,7 @@
from unittest.mock import Mock, patch
import pytest
+from greenlet import GreenletExit
import t.skip
from celery.concurrency.eventlet import TaskPool, Timer, apply_target
@@ -101,6 +102,7 @@ def test_pool(self):
x.on_apply(Mock())
x._pool = None
x.on_stop()
+ assert len(x._pool_map.keys()) == 1
assert x.getpid()
@patch('celery.concurrency.eventlet.base')
@@ -130,3 +132,32 @@ def test_get_info(self):
'free-threads': x._pool.free(),
'running-threads': x._pool.running(),
}
+
+ def test_terminate_job(self):
+ func = Mock()
+ pool = TaskPool(10)
+ pool.on_start()
+ pool.on_apply(func)
+
+ assert len(pool._pool_map.keys()) == 1
+ pid = list(pool._pool_map.keys())[0]
+ greenlet = pool._pool_map[pid]
+
+ pool.terminate_job(pid)
+ greenlet.link.assert_called_once()
+ greenlet.kill.assert_called_once()
+
+ def test_make_killable_target(self):
+ def valid_target():
+ return "some result..."
+
+ def terminating_target():
+ raise GreenletExit()
+
+ assert TaskPool._make_killable_target(valid_target)() == "some result..."
+ assert TaskPool._make_killable_target(terminating_target)() == (False, None, None)
+
+ def test_cleanup_after_job_finish(self):
+ testMap = {'1': None}
+ TaskPool._cleanup_after_job_finish(None, testMap, '1')
+ assert len(testMap) == 0
| I can‘t stop a task by its task_id
[2018-12-02 23:53:58,955: INFO/MainProcess] Received task: tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52]
[2018-12-02 23:54:02,479: INFO/MainProcess] Terminating bb1fe102-c1f9-4361-9370-1129900c0d52 (Signals.SIGTERM)
[2018-12-02 23:54:02,490: ERROR/MainProcess] pidbox command error: NotImplementedError("<class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job",)
Traceback (most recent call last):
File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 101, in dispatch
reply = handle(method, arguments)
File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 122, in handle_cast
return self.handle(method, arguments)
File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 116, in handle
return self.handlers[method](self.state, **arguments)
File "d:\envs\aidcs\lib\site-packages\celery\worker\control.py", line 163, in revoke
request.terminate(state.consumer.pool, signal=signum)
File "d:\envs\aidcs\lib\site-packages\celery\worker\request.py", line 249, in terminate
pool.terminate_job(self.worker_pid, signal)
File "d:\envs\aidcs\lib\site-packages\celery\concurrency\base.py", line 115, in terminate_job
'{0} does not implement kill_job'.format(type(self)))
NotImplementedError: <class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job
[2018-12-02 23:55:38,956: INFO/MainProcess] Task tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52] succeeded in 100.0s: 8
this is my main code:
from celery.app.control import Control
from tasks import add, app
myControl=Control(app)
myControl.revoke(task_id="b11729b0-6272-4527-af9d-dc24c0ad492d", terminate=True)
finally,if i want to look at the state of the task only by task_id (just like above), how .
| Even though I use different concurrency (prefork, using Redis as broker), I've seen the same problem - sometimes worker can't kill some tasks...
I used the `signal='SIGKILL'` param and got `Task handler raised error: Terminated(9,)`.
Does that mean the task termination succeed?
```python
celery_app.control.revoke(celery_task_id, terminate=True, signal='SIGKILL')
```
celery log:
```
[2018-12-05 16:07:32,872: DEBUG/MainProcess] pidbox received method revoke(signal='SIGKILL', task_id='970f8054-b4ee-4f70-9814-4899852342bd', terminate=True) [reply_to:None ticket:None]
[2018-12-05 16:07:32,873: INFO/MainProcess] Terminating 970f8054-b4ee-4f70-9814-4899852342bd (Signals.SIGKILL)
[2018-12-05 16:07:33,068: ERROR/MainProcess] Task handler raised error: Terminated(9,)
```
The trackback is showing that the taskpool component you used in celery is implement based on eventlet which has not implemented terminate_job function yet. But the default setting is prefork taskpool which and is the only one who has implemented that function.
And if you want to look at the state of the task only by task_id, you can use :
status = AsyncResult( id).status.The query is blocking which sent a request to your backend line database to get the status. | 2021-08-17T19:39:10 |
celery/celery | 6,923 | celery__celery-6923 | [
"4110",
"4110"
] | 3ef5b54bd5ff6d5b5e9184f348817a209e9111d6 | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -972,6 +972,20 @@ def update_state(self, task_id=None, state=None, meta=None, **kwargs):
self.backend.store_result(
task_id, meta, state, request=self.request, **kwargs)
+ def before_start(self, task_id, args, kwargs):
+ """Handler called before the task starts.
+
+ .. versionadded:: 5.2
+
+ Arguments:
+ task_id (str): Unique id of the task to execute.
+ args (Tuple): Original arguments for the task to execute.
+ kwargs (Dict): Original keyword arguments for the task to execute.
+
+ Returns:
+ None: The return value of this handler is ignored.
+ """
+
def on_success(self, retval, task_id, args, kwargs):
"""Success handler.
diff --git a/celery/app/trace.py b/celery/app/trace.py
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -345,8 +345,11 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
+ task_before_start = None
task_on_success = None
task_after_return = None
+ if task_has_custom(task, 'before_start'):
+ task_before_start = task.before_start
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
@@ -442,6 +445,9 @@ def trace_task(uuid, args, kwargs, request=None):
# -*- TRACE -*-
try:
+ if task_before_start:
+ task_before_start(uuid, args, kwargs)
+
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
| diff --git a/t/unit/tasks/test_trace.py b/t/unit/tasks/test_trace.py
--- a/t/unit/tasks/test_trace.py
+++ b/t/unit/tasks/test_trace.py
@@ -61,6 +61,14 @@ def test_trace_successful(self):
assert info is None
assert retval == 4
+ def test_trace_before_start(self):
+ @self.app.task(shared=False, before_start=Mock())
+ def add_with_before_start(x, y):
+ return x + y
+
+ self.trace(add_with_before_start, (2, 2), {})
+ add_with_before_start.before_start.assert_called()
+
def test_trace_on_success(self):
@self.app.task(shared=False, on_success=Mock())
def add_with_success(x, y):
| Question: any chance to have a on_started handler in the Task class?
It would be very handy to have an on_started call back in the Task class. In addition to after_return, on_failure, on_success and on_retry.
Thoughts?
Question: any chance to have a on_started handler in the Task class?
It would be very handy to have an on_started call back in the Task class. In addition to after_return, on_failure, on_success and on_retry.
Thoughts?
| @rhymes could you describe a use case?
Celery provides a [task_track_started](http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_track_started) configuration option, this allows us to open up flower and see which of the many long running tasks have been actually "STARTED" instead of received or pending.
We would like to show in the frontend that a task has been actually picked up and started in the queue but there's no callback for that.
As the documentation says:
> Having a ‘started’ state can be useful for when there are long running tasks and there’s a need to report what task is currently running.
But how can we report, or send notifications or tell the users in the frontend which of the many batch calculations in the queue is currently running, if there's no callback?
Hope this makes sense.
I don't know why `on_started` handler doesn't exist while others does.
Below structure can be workaround.
```
class BaseTask(Task):
def __call__(self, *args, **kwargs):
self.before_run()
return super(BaseTask, self).__call__(*args, **kwargs)
def before_run(self):
#do stuff
pass
@app.task(base=BaseTask)
def task1():
pass
```
I too have a use case for `on_started` if possible im looking for `on_progress` (custom state) also.
The workaround specified by @aydin will so but the task state will not update right?
@pavankumarkatakam no, you need to update manually the state. See here an example http://docs.celeryproject.org/en/latest/userguide/tasks.html#custom-states
To other people who find this issue & code suggestion from @aydin, so that you don't spend much time debugging.
As of recent-ish updates to Celery docs following #5652, it is recommended to call `self.run` instead of `super.__call__` when overriding the `__call__` method. We have hit a similar scenario during testing when using `__call__` as described in #5643 where tasks are not enqueued for retries.
> To other people who find this issue & code suggestion from @aydin, so that you don't spend much time debugging.
>
> As of recent-ish updates to Celery docs following #5652, it is recommended to call `self.run` instead of `super.__call__` when overriding the `__call__` method. We have hit a similar scenario during testing when using `__call__` as described in #5643 where tasks are not enqueued for retries.
so should we close this issue?
I think we can add this to the documentation or add the hook if everyone wants it.
@rhymes could you describe a use case?
Celery provides a [task_track_started](http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_track_started) configuration option, this allows us to open up flower and see which of the many long running tasks have been actually "STARTED" instead of received or pending.
We would like to show in the frontend that a task has been actually picked up and started in the queue but there's no callback for that.
As the documentation says:
> Having a ‘started’ state can be useful for when there are long running tasks and there’s a need to report what task is currently running.
But how can we report, or send notifications or tell the users in the frontend which of the many batch calculations in the queue is currently running, if there's no callback?
Hope this makes sense.
I don't know why `on_started` handler doesn't exist while others does.
Below structure can be workaround.
```
class BaseTask(Task):
def __call__(self, *args, **kwargs):
self.before_run()
return super(BaseTask, self).__call__(*args, **kwargs)
def before_run(self):
#do stuff
pass
@app.task(base=BaseTask)
def task1():
pass
```
I too have a use case for `on_started` if possible im looking for `on_progress` (custom state) also.
The workaround specified by @aydin will so but the task state will not update right?
@pavankumarkatakam no, you need to update manually the state. See here an example http://docs.celeryproject.org/en/latest/userguide/tasks.html#custom-states
To other people who find this issue & code suggestion from @aydin, so that you don't spend much time debugging.
As of recent-ish updates to Celery docs following #5652, it is recommended to call `self.run` instead of `super.__call__` when overriding the `__call__` method. We have hit a similar scenario during testing when using `__call__` as described in #5643 where tasks are not enqueued for retries.
> To other people who find this issue & code suggestion from @aydin, so that you don't spend much time debugging.
>
> As of recent-ish updates to Celery docs following #5652, it is recommended to call `self.run` instead of `super.__call__` when overriding the `__call__` method. We have hit a similar scenario during testing when using `__call__` as described in #5643 where tasks are not enqueued for retries.
so should we close this issue?
I think we can add this to the documentation or add the hook if everyone wants it. | 2021-08-22T17:59:03 |
celery/celery | 6,942 | celery__celery-6942 | [
"102"
] | 917088f6987d99b51364e43353c6ef1ce8e02e24 | diff --git a/celery/concurrency/prefork.py b/celery/concurrency/prefork.py
--- a/celery/concurrency/prefork.py
+++ b/celery/concurrency/prefork.py
@@ -41,6 +41,8 @@ def process_initializer(app, hostname):
Initialize the child pool process to ensure the correct
app instance is used and things like logging works.
"""
+ # Each running worker gets SIGKILL by OS when main process exits.
+ platforms.set_pdeathsig('SIGKILL')
_set_task_join_will_block(True)
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
diff --git a/celery/platforms.py b/celery/platforms.py
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -17,6 +17,7 @@
from contextlib import contextmanager
from billiard.compat import close_open_fds, get_fdmax
+from billiard.util import set_pdeathsig as _set_pdeathsig
# fileno used to be in this module
from kombu.utils.compat import maybe_fileno
from kombu.utils.encoding import safe_str
@@ -708,6 +709,16 @@ def strargv(argv):
return ''
+def set_pdeathsig(name):
+ """Sends signal ``name`` to process when parent process terminates."""
+ if signals.supported('SIGKILL'):
+ try:
+ _set_pdeathsig(signals.signum('SIGKILL'))
+ except OSError:
+ # We ignore when OS does not support set_pdeathsig
+ pass
+
+
def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
| diff --git a/t/unit/concurrency/test_prefork.py b/t/unit/concurrency/test_prefork.py
--- a/t/unit/concurrency/test_prefork.py
+++ b/t/unit/concurrency/test_prefork.py
@@ -53,11 +53,18 @@ def get(self):
return self.value
+@patch('celery.platforms.set_mp_process_title')
class test_process_initializer:
+ @staticmethod
+ def Loader(*args, **kwargs):
+ loader = Mock(*args, **kwargs)
+ loader.conf = {}
+ loader.override_backends = {}
+ return loader
+
@patch('celery.platforms.signals')
- @patch('celery.platforms.set_mp_process_title')
- def test_process_initializer(self, set_mp_process_title, _signals):
+ def test_process_initializer(self, _signals, set_mp_process_title):
with mock.restore_logging():
from celery import signals
from celery._state import _tls
@@ -67,13 +74,7 @@ def test_process_initializer(self, set_mp_process_title, _signals):
on_worker_process_init = Mock()
signals.worker_process_init.connect(on_worker_process_init)
- def Loader(*args, **kwargs):
- loader = Mock(*args, **kwargs)
- loader.conf = {}
- loader.override_backends = {}
- return loader
-
- with self.Celery(loader=Loader) as app:
+ with self.Celery(loader=self.Loader) as app:
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, 'awesome.worker.com')
_signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
@@ -100,6 +101,19 @@ def Loader(*args, **kwargs):
finally:
os.environ.pop('CELERY_LOG_FILE', None)
+ @patch('celery.platforms.set_pdeathsig')
+ def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title):
+ with mock.restore_logging():
+ from celery import signals
+ on_worker_process_init = Mock()
+ signals.worker_process_init.connect(on_worker_process_init)
+ from celery.concurrency.prefork import process_initializer
+
+ with self.Celery(loader=self.Loader) as app:
+ app.conf = AttributeDict(DEFAULTS)
+ process_initializer(app, 'awesome.worker.com')
+ _set_pdeathsig.assert_called_once_with('SIGKILL')
+
class test_process_destructor:
diff --git a/t/unit/utils/test_platforms.py b/t/unit/utils/test_platforms.py
--- a/t/unit/utils/test_platforms.py
+++ b/t/unit/utils/test_platforms.py
@@ -18,7 +18,7 @@
close_open_fds, create_pidlock, detached,
fd_by_path, get_fdmax, ignore_errno, initgroups,
isatty, maybe_drop_privileges, parse_gid,
- parse_uid, set_mp_process_title,
+ parse_uid, set_mp_process_title, set_pdeathsig,
set_process_title, setgid, setgroups, setuid,
signals)
from celery.utils.text import WhateverIO
@@ -170,6 +170,18 @@ def test_setitem_raises(self, set):
signals['INT'] = lambda *a: a
+class test_set_pdeathsig:
+
+ def test_call(self):
+ set_pdeathsig('SIGKILL')
+
+ @t.skip.if_win32
+ def test_call_with_correct_parameter(self):
+ with patch('celery.platforms._set_pdeathsig') as _set_pdeathsig:
+ set_pdeathsig('SIGKILL')
+ _set_pdeathsig.assert_called_once_with(signal.SIGKILL)
+
+
@t.skip.if_win32
class test_get_fdmax:
| Restarting celery issues and better supervisord config file
I use supervisord config file based on the example in celery repository and have some issues when celeryd restart is occured: sometimes task processing silently stops after restarting celery without any error messages in logs. Processes remains visible in process list.
Finally I figured out that sometimes when processes are restarted celery spawns additional process that is not managed by supervisord and this leads to these bugs. So I started to watch `ps` output carefully after each restart and kill extra processes manually via kill <id>. After killing these processes tasks begin to be executed properly. This is sort of hack that solve a problem for a week or so.
And today I think the real reason is found. The default supervisord value for 'stopwaitsecs' option is 10s. This means that after 10s celery process will be killed with KILL signal instead of TERM. It seems that celery don't like being killed and try to spawn additional process in that case.
So I think it'll be good to add something like 'stopwaitsecs=600' to all supervisord example config files (from faq: "You should never stop celeryd with the KILL signal (-9), unless you’ve tried TERM a few times and waited a few minutes to let it get a chance to shut down.") and investigate celeryd behaviour on KILL signal: it is mentioned in docs that tasks will be lost (and it is tolerably in many cases) but the issue with spawned process is a bit weird.
| Processes spawned when receiving the `KILL` signal is weird indeed. I don't see that behavior when used outside of `supervisord`, so maybe this is something caused by it?
If you install the `setproctitle` module, celery should report the kind of process in `ps` listings, could you do that to investigate what kind of process is created?
(`easy_install setproctitle`)
Setting the timeout to `600` is probably good. Is there any setting for infinity (maybe with a warning if it takes too long)? When `celeryd` is killed via `TERM` (which is the preferred shutdown signal) it stops receiving messages and waits for the currently executing tasks to finish. And I guess for most applications, termination mid-execution is not acceptable.
As for process spawning: setproctitle and watching for process ids was helpful. It is not process spawning. Worker processes remain alive when parent process is killed.
This is a simulation of supervisord restart with manual killing and zero timeout:
```
4976 ? Ss 0:00 /usr/bin/python /usr/bin/supervisord --pidfile /var/run/supervisord.pid
5422 ? S 0:01 \_ [celerybeat] --schedule=/var/lib/celery/celerybeat-schedule-nadovmeste --loglevel=INFO
6101 ? Sl 0:00 \_ [celeryd.MainProcess] Running... (--loglevel=INFO)
6108 ? S 0:00 \_ [celeryd.PoolWorker-1]
nadovmeste:~# kill 6101 & kill -9 6101 &
```
ps -afx:
```
4976 ? Ss 0:00 /usr/bin/python /usr/bin/supervisord --pidfile /var/run/supervisord.pid
5422 ? S 0:01 \_ [celerybeat] --schedule=/var/lib/celery/celerybeat-schedule-nadovmeste --loglevel=INFO
6867 ? Sl 0:00 \_ [celeryd.MainProcess] Running... (--loglevel=INFO)
6875 ? S 0:00 \_ [celeryd.PoolWorker-1]
6108 ? S 0:00 [celeryd.PoolWorker-1]
```
I was able reproduce this only with such artifical race between `kill` and `kill -9`. Sometimes worker gets killed properly. The issue seems to be supervisord-specific because when I start celeryd from console I have no luck reproducing it.
I was able to reproduce this with console-started scripts after several attempts:
```
/home/nadovmeste/envs/nadovmeste/bin/python /home/nadovmeste/src/nadovmeste/manage.py celeryd -B --loglevel=INFO&
```
and then in another terminal session:
```
nadovmeste:~# ps -afx
6450 ? Ss 0:00 \_ sshd: root@pts/2
6452 pts/2 Ss+ 0:00 \_ -bash
9343 pts/2 Sl 0:00 \_ [celeryd.MainProcess] Running... (-B --loglevel=INFO)
9350 pts/2 S 0:00 \_ [celeryd.PoolWorker-2]
9355 pts/2 S 0:00 \_ [celerybeat]
nadovmeste:~# kill 9343 & kill -9 9343
nadovmeste:~# ps -afx
4526 ? Ss 0:00 \_ sshd: root@pts/1
4529 pts/1 Ss 0:00 | \_ -bash
9366 pts/1 R+ 0:00 | \_ ps -afx
6450 ? Ss 0:00 \_ sshd: root@pts/2
6452 pts/2 Ss+ 0:00 \_ -bash
...
9350 pts/2 S 0:00 [celeryd.PoolWorker-2]
9355 pts/2 S 0:00 [celerybeat]
```
Haven't found any special option for infinite timeout with warning in supervisord docs. Probably very large number will suffice if it is what we want.
Maybe it's something related to celerybeat because the I was able to reproduce the issue for console-started celeryd only after using the `-B` option.
If i am testing some celery tasks locally and i use the -B option sometimes the process is not killed when i used ctrl-c.
I can't reproduce this locally. Btw, are you running the master branch? I just fixed a bug that could hang shutdown. If you could test with this it would be nice.
Yes, I'm running the latest master branch. I saw your bug-fixing commit and hoped that it will help but it seems that it doesn't help in my case: the latest celery seems to behave the same. But it is possible that the initial problem is solved - I check this only with an immediate kill. Can't wrap my hand around it now :) The ctrl-c issue is not reproducible with my setup.
So the bug report, simplified: http://gist.github.com/401028 . The results are always the same (not sometimes). I have some periodic tasks and some non-periodic. Tasks are simple and don't take much time to finish. Is it a bug that children processes stay alive after killing the main process? If so and you can't reproduce it then I'll try to provide the minimal project.
The celerybeat killing behaviour is interesting: when I kill hanging(?) celerybeat process the hanging(?) worker process also shutdowns.
@kmike I still can't reproduce with the commands above. Maybe because I'm on OS X, or maybe you're running Python 2.5? (I'm running 2.6.1)
Could run it with `--loglevel=DEBUG?` It could provide some info about where it stops.
The celerybeat process is started by the main process, so I'm assuming the main process is waiting
for the celerybeat to exit before it kills the remaining pool processes.
I thought that the main process was killed: it is not visible in process list. Don't have much experience with process management though.
My setup was Debian Lenny + python 2.5.
I'll try to run celeryd with --loglevel=DEBUG and to reproduce it on my macbook.
hmm, you're right of course. It's almost like the beat process takes ownership of the pool processes.
I just tried to reproduce on Debian Lenny with python 2.5, and it works just right there.
Tried killing with both TERM and INT.
Ask, thank you for help.
I think that initial problem was solved with increased supervisord timeout and your bug-fixing commit. The simulation was incorrect because I use `kill -9` commands and they send KILL signal instead of TERM. With TERM signal processes are getting killed properly.
Supervisord use TERM signal so all should be fine.
But the thing that scares me a bit is that the initial bug wasn't investigated. I'll try to reproduce it and let you know.
Ah! I'm so sorry. I didn't read the issue carefully enough. Yes! That's exactly what happens when you kill it with SIGKILL. The 9 signal can't be catched, so there's nothing we can do about this AFAIK.
In case you are still having issues terminating your Celery workers, you might try setting `stopasgroup=true` before increasing your `stopwaitsecs`.
| 2021-09-04T20:49:46 |
celery/celery | 7,057 | celery__celery-7057 | [
"7056"
] | 9c957547a77f581ad7742c2e4f5fb63643ded3e0 | diff --git a/celery/utils/log.py b/celery/utils/log.py
--- a/celery/utils/log.py
+++ b/celery/utils/log.py
@@ -224,13 +224,13 @@ def write(self, data):
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return 0
- data = data.rstrip('\n')
if data and not self.closed:
self._thread.recurse_protection = True
try:
- safe_data = safe_str(data)
- self.logger.log(self.loglevel, safe_data)
- return len(safe_data)
+ safe_data = safe_str(data).rstrip('\n')
+ if safe_data:
+ self.logger.log(self.loglevel, safe_data)
+ return len(safe_data)
finally:
self._thread.recurse_protection = False
return 0
| diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -286,6 +286,31 @@ def test_logging_proxy(self):
p.write('foo')
assert stderr.getvalue()
+ @mock.restore_logging()
+ def test_logging_proxy_bytes(self):
+ logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+ root=False)
+
+ with mock.wrap_logger(logger) as sio:
+ p = LoggingProxy(logger, loglevel=logging.ERROR)
+ p.close()
+ p.write(b'foo')
+ assert 'foo' not in str(sio.getvalue())
+ p.closed = False
+ p.write(b'\n')
+ assert str(sio.getvalue()) == ''
+ write_res = p.write(b'foo ')
+ assert str(sio.getvalue()) == 'foo \n'
+ assert write_res == 4
+ p.flush()
+ p.close()
+ assert not p.isatty()
+
+ with mock.stdouts() as (stdout, stderr):
+ with in_sighandler():
+ p.write(b'foo')
+ assert stderr.getvalue()
+
@mock.restore_logging()
def test_logging_proxy_recurse_protection(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
| ProxyLogger.write raises TypeError due to rstrip on a bytes instance.
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- [Created in PR #6838](https://github.com/celery/celery/pull/6838)
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.2.0
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
TypeError: a bytes-like object is required, not 'str'
File "aspiredu/project/celery.py", line 43, in run_command
sys.stdout.write(line) # Write all output from the command
File "celery/utils/log.py", line 227, in write
data = data.rstrip('\n')
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: 3.9.6
* **Minimal Celery Version**: 5.2
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
scout_apm==2.23.3
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
Add the following test to t.unit.app.test_log.test_default_logger
What's happening is that the logger is getting a data instance of type bytes, but can call rstrip with a `str` parameter which breaks.
```python
@mock.restore_logging()
def test_logging_proxy_bytes(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
with mock.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write(b'foo')
assert 'foo' not in str(sio.getvalue())
p.closed = False
p.write(b'\n')
assert str(sio.getvalue()) == ''
write_res = p.write(b'foo ')
assert str(sio.getvalue()) == 'foo \n'
assert write_res == 4
p.flush()
p.close()
assert not p.isatty()
with mock.stdouts() as (stdout, stderr):
with in_sighandler():
p.write(b'foo')
assert stderr.getvalue()
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Calling `ProxyLogger(logger).write(b'value')` should not break when a bytes instance is passed in.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
A `TypeError: a bytes-like object is required, not 'str'` is raised.
I will be creating a PR as a follow-up to this issue.
| Hey @tim-schilling :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-11-08T17:16:46 |
celery/celery | 7,077 | celery__celery-7077 | [
"6958"
] | 011dc063719c7bce9c105a8e86095a0ccbf7cb1e | diff --git a/t/unit/conftest.py b/t/unit/conftest.py
--- a/t/unit/conftest.py
+++ b/t/unit/conftest.py
@@ -1,13 +1,19 @@
+import builtins
+import inspect
+import io
import logging
import os
+import platform
import sys
import threading
+import types
import warnings
-from importlib import import_module
-from unittest.mock import Mock
+from contextlib import contextmanager
+from functools import wraps
+from importlib import import_module, reload
+from unittest.mock import MagicMock, Mock, patch
import pytest
-from case.utils import decorator
from kombu import Queue
from celery.backends.cache import CacheBackend, DummyClient
@@ -39,6 +45,24 @@ class WindowsError(Exception):
CASE_LOG_LEVEL_EFFECT = 'Test {0} modified the level of the root logger'
CASE_LOG_HANDLER_EFFECT = 'Test {0} modified handlers for the root logger'
+_SIO_write = io.StringIO.write
+_SIO_init = io.StringIO.__init__
+
+SENTINEL = object()
+
+
+def noop(*args, **kwargs):
+ pass
+
+
+class WhateverIO(io.StringIO):
+
+ def __init__(self, v=None, *a, **kw):
+ _SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw)
+
+ def write(self, data):
+ _SIO_write(self, data.decode() if isinstance(data, bytes) else data)
+
@pytest.fixture(scope='session')
def celery_config():
@@ -88,7 +112,7 @@ def reset_cache_backend_state(celery_app):
backend._cache.clear()
-@decorator
+@contextmanager
def assert_signal_called(signal, **expected):
"""Context that verifes signal is called before exiting."""
handler = Mock()
@@ -113,7 +137,6 @@ def app(celery_app):
def AAA_disable_multiprocessing():
# pytest-cov breaks if a multiprocessing.Process is started,
# so disable them completely to make sure it doesn't happen.
- from unittest.mock import patch
stuff = [
'multiprocessing.Process',
'billiard.Process',
@@ -326,3 +349,447 @@ def import_all_modules(name=__name__, file=__file__,
'Ignored error importing module {}: {!r}'.format(
module, exc,
)))
+
+
[email protected]
+def sleepdeprived(request):
+ """Mock sleep method in patched module to do nothing.
+
+ Example:
+ >>> import time
+ >>> @pytest.mark.sleepdeprived_patched_module(time)
+ >>> def test_foo(self, sleepdeprived):
+ >>> pass
+ """
+ module = request.node.get_closest_marker(
+ "sleepdeprived_patched_module").args[0]
+ old_sleep, module.sleep = module.sleep, noop
+ try:
+ yield
+ finally:
+ module.sleep = old_sleep
+
+
+# Taken from
+# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py
[email protected]
+def mask_modules(request):
+ """Ban some modules from being importable inside the context
+ For example::
+ >>> @pytest.mark.masked_modules('gevent.monkey')
+ >>> def test_foo(self, mask_modules):
+ ... try:
+ ... import sys
+ ... except ImportError:
+ ... print('sys not found')
+ sys not found
+ """
+ realimport = builtins.__import__
+ modnames = request.node.get_closest_marker("masked_modules").args
+
+ def myimp(name, *args, **kwargs):
+ if name in modnames:
+ raise ImportError('No module named %s' % name)
+ else:
+ return realimport(name, *args, **kwargs)
+
+ builtins.__import__ = myimp
+ try:
+ yield
+ finally:
+ builtins.__import__ = realimport
+
+
[email protected]
+def environ(request):
+ """Mock environment variable value.
+ Example::
+ >>> @pytest.mark.patched_environ('DJANGO_SETTINGS_MODULE', 'proj.settings')
+ >>> def test_other_settings(self, environ):
+ ... ...
+ """
+ env_name, env_value = request.node.get_closest_marker("patched_environ").args
+ prev_val = os.environ.get(env_name, SENTINEL)
+ os.environ[env_name] = env_value
+ try:
+ yield
+ finally:
+ if prev_val is SENTINEL:
+ os.environ.pop(env_name, None)
+ else:
+ os.environ[env_name] = prev_val
+
+
+def replace_module_value(module, name, value=None):
+ """Mock module value, given a module, attribute name and value.
+
+ Example::
+
+ >>> replace_module_value(module, 'CONSTANT', 3.03)
+ """
+ has_prev = hasattr(module, name)
+ prev = getattr(module, name, None)
+ if value:
+ setattr(module, name, value)
+ else:
+ try:
+ delattr(module, name)
+ except AttributeError:
+ pass
+ try:
+ yield
+ finally:
+ if prev is not None:
+ setattr(module, name, prev)
+ if not has_prev:
+ try:
+ delattr(module, name)
+ except AttributeError:
+ pass
+
+
+@contextmanager
+def platform_pyimp(value=None):
+ """Mock :data:`platform.python_implementation`
+ Example::
+ >>> with platform_pyimp('PyPy'):
+ ... ...
+ """
+ yield from replace_module_value(platform, 'python_implementation', value)
+
+
+@contextmanager
+def sys_platform(value=None):
+ """Mock :data:`sys.platform`
+
+ Example::
+ >>> mock.sys_platform('darwin'):
+ ... ...
+ """
+ prev, sys.platform = sys.platform, value
+ try:
+ yield
+ finally:
+ sys.platform = prev
+
+
+@contextmanager
+def pypy_version(value=None):
+ """Mock :data:`sys.pypy_version_info`
+
+ Example::
+ >>> with pypy_version((3, 6, 1)):
+ ... ...
+ """
+ yield from replace_module_value(sys, 'pypy_version_info', value)
+
+
+def _restore_logging():
+ outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__
+ root = logging.getLogger()
+ level = root.level
+ handlers = root.handlers
+
+ try:
+ yield
+ finally:
+ sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
+ root.level = level
+ root.handlers[:] = handlers
+
+
+@contextmanager
+def restore_logging_context_manager():
+ """Restore root logger handlers after test returns.
+ Example::
+ >>> with restore_logging_context_manager():
+ ... setup_logging()
+ """
+ yield from _restore_logging()
+
+
[email protected]
+def restore_logging(request):
+ """Restore root logger handlers after test returns.
+ Example::
+ >>> def test_foo(self, restore_logging):
+ ... setup_logging()
+ """
+ yield from _restore_logging()
+
+
[email protected]
+def module(request):
+ """Mock one or modules such that every attribute is a :class:`Mock`."""
+ yield from _module(*request.node.get_closest_marker("patched_module").args)
+
+
+@contextmanager
+def module_context_manager(*names):
+ """Mock one or modules such that every attribute is a :class:`Mock`."""
+ yield from _module(*names)
+
+
+def _module(*names):
+ prev = {}
+
+ class MockModule(types.ModuleType):
+
+ def __getattr__(self, attr):
+ setattr(self, attr, Mock())
+ return types.ModuleType.__getattribute__(self, attr)
+
+ mods = []
+ for name in names:
+ try:
+ prev[name] = sys.modules[name]
+ except KeyError:
+ pass
+ mod = sys.modules[name] = MockModule(name)
+ mods.append(mod)
+ try:
+ yield mods
+ finally:
+ for name in names:
+ try:
+ sys.modules[name] = prev[name]
+ except KeyError:
+ try:
+ del(sys.modules[name])
+ except KeyError:
+ pass
+
+
+class _patching:
+
+ def __init__(self, monkeypatch, request):
+ self.monkeypatch = monkeypatch
+ self.request = request
+
+ def __getattr__(self, name):
+ return getattr(self.monkeypatch, name)
+
+ def __call__(self, path, value=SENTINEL, name=None,
+ new=MagicMock, **kwargs):
+ value = self._value_or_mock(value, new, name, path, **kwargs)
+ self.monkeypatch.setattr(path, value)
+ return value
+
+ def object(self, target, attribute, *args, **kwargs):
+ return _wrap_context(
+ patch.object(target, attribute, *args, **kwargs),
+ self.request)
+
+ def _value_or_mock(self, value, new, name, path, **kwargs):
+ if value is SENTINEL:
+ value = new(name=name or path.rpartition('.')[2])
+ for k, v in kwargs.items():
+ setattr(value, k, v)
+ return value
+
+ def setattr(self, target, name=SENTINEL, value=SENTINEL, **kwargs):
+ # alias to __call__ with the interface of pytest.monkeypatch.setattr
+ if value is SENTINEL:
+ value, name = name, None
+ return self(target, value, name=name)
+
+ def setitem(self, dic, name, value=SENTINEL, new=MagicMock, **kwargs):
+ # same as pytest.monkeypatch.setattr but default value is MagicMock
+ value = self._value_or_mock(value, new, name, dic, **kwargs)
+ self.monkeypatch.setitem(dic, name, value)
+ return value
+
+ def modules(self, *mods):
+ modules = []
+ for mod in mods:
+ mod = mod.split('.')
+ modules.extend(reversed([
+ '.'.join(mod[:-i] if i else mod) for i in range(len(mod))
+ ]))
+ modules = sorted(set(modules))
+ return _wrap_context(module_context_manager(*modules), self.request)
+
+
+def _wrap_context(context, request):
+ ret = context.__enter__()
+
+ def fin():
+ context.__exit__(*sys.exc_info())
+ request.addfinalizer(fin)
+ return ret
+
+
[email protected]()
+def patching(monkeypatch, request):
+ """Monkeypath.setattr shortcut.
+ Example:
+ .. code-block:: python
+ >>> def test_foo(patching):
+ >>> # execv value here will be mock.MagicMock by default.
+ >>> execv = patching('os.execv')
+ >>> patching('sys.platform', 'darwin') # set concrete value
+ >>> patching.setenv('DJANGO_SETTINGS_MODULE', 'x.settings')
+ >>> # val will be of type mock.MagicMock by default
+ >>> val = patching.setitem('path.to.dict', 'KEY')
+ """
+ return _patching(monkeypatch, request)
+
+
+@contextmanager
+def stdouts():
+ """Override `sys.stdout` and `sys.stderr` with `StringIO`
+ instances.
+ >>> with conftest.stdouts() as (stdout, stderr):
+ ... something()
+ ... self.assertIn('foo', stdout.getvalue())
+ """
+ prev_out, prev_err = sys.stdout, sys.stderr
+ prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
+ mystdout, mystderr = WhateverIO(), WhateverIO()
+ sys.stdout = sys.__stdout__ = mystdout
+ sys.stderr = sys.__stderr__ = mystderr
+
+ try:
+ yield mystdout, mystderr
+ finally:
+ sys.stdout = prev_out
+ sys.stderr = prev_err
+ sys.__stdout__ = prev_rout
+ sys.__stderr__ = prev_rerr
+
+
+@contextmanager
+def reset_modules(*modules):
+ """Remove modules from :data:`sys.modules` by name,
+ and reset back again when the test/context returns.
+ Example::
+ >>> with conftest.reset_modules('celery.result', 'celery.app.base'):
+ ... pass
+ """
+ prev = {
+ k: sys.modules.pop(k) for k in modules if k in sys.modules
+ }
+
+ try:
+ for k in modules:
+ reload(import_module(k))
+ yield
+ finally:
+ sys.modules.update(prev)
+
+
+def get_logger_handlers(logger):
+ return [
+ h for h in logger.handlers
+ if not isinstance(h, logging.NullHandler)
+ ]
+
+
+@contextmanager
+def wrap_logger(logger, loglevel=logging.ERROR):
+ """Wrap :class:`logging.Logger` with a StringIO() handler.
+ yields a StringIO handle.
+ Example::
+ >>> with conftest.wrap_logger(logger, loglevel=logging.DEBUG) as sio:
+ ... ...
+ ... sio.getvalue()
+ """
+ old_handlers = get_logger_handlers(logger)
+ sio = WhateverIO()
+ siohandler = logging.StreamHandler(sio)
+ logger.handlers = [siohandler]
+
+ try:
+ yield sio
+ finally:
+ logger.handlers = old_handlers
+
+
+@contextmanager
+def _mock_context(mock):
+ context = mock.return_value = Mock()
+ context.__enter__ = Mock()
+ context.__exit__ = Mock()
+
+ def on_exit(*x):
+ if x[0]:
+ raise x[0] from x[1]
+ context.__exit__.side_effect = on_exit
+ context.__enter__.return_value = context
+ try:
+ yield context
+ finally:
+ context.reset()
+
+
+@contextmanager
+def open(side_effect=None):
+ """Patch builtins.open so that it returns StringIO object.
+ :param side_effect: Additional side effect for when the open context
+ is entered.
+ Example::
+ >>> with mock.open(io.BytesIO) as open_fh:
+ ... something_opening_and_writing_bytes_to_a_file()
+ ... self.assertIn(b'foo', open_fh.getvalue())
+ """
+ with patch('builtins.open') as open_:
+ with _mock_context(open_) as context:
+ if side_effect is not None:
+ context.__enter__.side_effect = side_effect
+ val = context.__enter__.return_value = WhateverIO()
+ val.__exit__ = Mock()
+ yield val
+
+
+@contextmanager
+def module_exists(*modules):
+ """Patch one or more modules to ensure they exist.
+ A module name with multiple paths (e.g. gevent.monkey) will
+ ensure all parent modules are also patched (``gevent`` +
+ ``gevent.monkey``).
+ Example::
+ >>> with conftest.module_exists('gevent.monkey'):
+ ... gevent.monkey.patch_all = Mock(name='patch_all')
+ ... ...
+ """
+ gen = []
+ old_modules = []
+ for module in modules:
+ if isinstance(module, str):
+ module = types.ModuleType(module)
+ gen.append(module)
+ if module.__name__ in sys.modules:
+ old_modules.append(sys.modules[module.__name__])
+ sys.modules[module.__name__] = module
+ name = module.__name__
+ if '.' in name:
+ parent, _, attr = name.rpartition('.')
+ setattr(sys.modules[parent], attr, module)
+ try:
+ yield
+ finally:
+ for module in gen:
+ sys.modules.pop(module.__name__, None)
+ for module in old_modules:
+ sys.modules[module.__name__] = module
+
+
+def _bind(f, o):
+ @wraps(f)
+ def bound_meth(*fargs, **fkwargs):
+ return f(o, *fargs, **fkwargs)
+ return bound_meth
+
+
+class MockCallbacks:
+
+ def __new__(cls, *args, **kwargs):
+ r = Mock(name=cls.__name__)
+ cls.__init__(r, *args, **kwargs)
+ for key, value in vars(cls).items():
+ if key not in ('__dict__', '__weakref__', '__new__', '__init__'):
+ if inspect.ismethod(value) or inspect.isfunction(value):
+ r.__getattr__(key).side_effect = _bind(value, r)
+ else:
+ r.__setattr__(key, value)
+ return r
| diff --git a/celery/contrib/testing/mocks.py b/celery/contrib/testing/mocks.py
--- a/celery/contrib/testing/mocks.py
+++ b/celery/contrib/testing/mocks.py
@@ -2,15 +2,11 @@
import numbers
from datetime import datetime, timedelta
from typing import Any, Mapping, Sequence
+from unittest.mock import Mock
from celery import Celery
from celery.canvas import Signature
-try:
- from case import Mock
-except ImportError:
- from unittest.mock import Mock
-
def TaskMessage(
name, # type: str
@@ -113,3 +109,29 @@ def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
utc=utc,
**sig.options
)
+
+
+class _ContextMock(Mock):
+ """Dummy class implementing __enter__ and __exit__.
+
+ The :keyword:`with` statement requires these to be implemented
+ in the class, not just the instance.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_info):
+ pass
+
+
+def ContextMock(*args, **kwargs):
+ """Mock that mocks :keyword:`with` statement contexts."""
+ obj = _ContextMock(*args, **kwargs)
+ obj.attach_mock(_ContextMock(), '__enter__')
+ obj.attach_mock(_ContextMock(), '__exit__')
+ obj.__enter__.return_value = obj
+ # if __exit__ return a value the exception is ignored,
+ # so it must return None here.
+ obj.__exit__.return_value = None
+ return obj
diff --git a/requirements/test.txt b/requirements/test.txt
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,4 +1,3 @@
-case>=1.3.1
pytest~=6.2
pytest-celery
pytest-subtests
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -9,7 +9,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import ContextMock, mock
from vine import promise
from celery import Celery, _state
@@ -18,6 +17,7 @@
from celery.app import base as _appbase
from celery.app import defaults
from celery.backends.base import Backend
+from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import ImproperlyConfigured
from celery.loaders.base import unconfigured
from celery.platforms import pyimplementation
@@ -25,6 +25,7 @@
from celery.utils.objects import Bunch
from celery.utils.serialization import pickle
from celery.utils.time import localize, timezone, to_utc
+from t.unit import conftest
THIS_IS_A_KEY = 'this is a value'
@@ -915,10 +916,10 @@ def add(x, y):
assert 'add1' in self.app.conf.beat_schedule
assert 'add2' in self.app.conf.beat_schedule
- def test_pool_no_multiprocessing(self):
- with mock.mask_modules('multiprocessing.util'):
- pool = self.app.pool
- assert pool is self.app._pool
+ @pytest.mark.masked_modules('multiprocessing.util')
+ def test_pool_no_multiprocessing(self, mask_modules):
+ pool = self.app.pool
+ assert pool is self.app._pool
def test_bugreport(self):
assert self.app.bugreport()
@@ -1078,26 +1079,26 @@ def test_enable_disable_trace(self):
class test_pyimplementation:
def test_platform_python_implementation(self):
- with mock.platform_pyimp(lambda: 'Xython'):
+ with conftest.platform_pyimp(lambda: 'Xython'):
assert pyimplementation() == 'Xython'
def test_platform_jython(self):
- with mock.platform_pyimp():
- with mock.sys_platform('java 1.6.51'):
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('java 1.6.51'):
assert 'Jython' in pyimplementation()
def test_platform_pypy(self):
- with mock.platform_pyimp():
- with mock.sys_platform('darwin'):
- with mock.pypy_version((1, 4, 3)):
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('darwin'):
+ with conftest.pypy_version((1, 4, 3)):
assert 'PyPy' in pyimplementation()
- with mock.pypy_version((1, 4, 3, 'a4')):
+ with conftest.pypy_version((1, 4, 3, 'a4')):
assert 'PyPy' in pyimplementation()
def test_platform_fallback(self):
- with mock.platform_pyimp():
- with mock.sys_platform('darwin'):
- with mock.pypy_version():
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('darwin'):
+ with conftest.pypy_version():
assert 'CPython' == pyimplementation()
diff --git a/t/unit/app/test_builtins.py b/t/unit/app/test_builtins.py
--- a/t/unit/app/test_builtins.py
+++ b/t/unit/app/test_builtins.py
@@ -1,10 +1,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import ContextMock
from celery import chord, group
from celery.app import builtins
+from celery.contrib.testing.mocks import ContextMock
from celery.utils.functional import pass1
diff --git a/t/unit/app/test_loaders.py b/t/unit/app/test_loaders.py
--- a/t/unit/app/test_loaders.py
+++ b/t/unit/app/test_loaders.py
@@ -4,7 +4,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery import loaders
from celery.exceptions import NotConfigured
@@ -120,8 +119,8 @@ def test_read_configuration_not_a_package(self, find_module):
l.read_configuration(fail_silently=False)
@patch('celery.loaders.base.find_module')
- @mock.environ('CELERY_CONFIG_MODULE', 'celeryconfig.py')
- def test_read_configuration_py_in_name(self, find_module):
+ @pytest.mark.patched_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py')
+ def test_read_configuration_py_in_name(self, find_module, environ):
find_module.side_effect = NotAPackage()
l = default.Loader(app=self.app)
with pytest.raises(NotAPackage):
diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -6,8 +6,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
-from case.utils import get_logger_handlers
from celery import signals, uuid
from celery.app.log import TaskFormatter
@@ -15,6 +13,7 @@
get_task_logger, in_sighandler)
from celery.utils.log import logger as base_logger
from celery.utils.log import logger_isa, task_logger
+from t.unit import conftest
class test_TaskFormatter:
@@ -165,12 +164,10 @@ def test_get_logger_root(self):
logger = get_logger(base_logger.name)
assert logger.parent is logging.root
- @mock.restore_logging()
- def test_setup_logging_subsystem_misc(self):
+ def test_setup_logging_subsystem_misc(self, restore_logging):
self.app.log.setup_logging_subsystem(loglevel=None)
- @mock.restore_logging()
- def test_setup_logging_subsystem_misc2(self):
+ def test_setup_logging_subsystem_misc2(self, restore_logging):
self.app.conf.worker_hijack_root_logger = True
self.app.log.setup_logging_subsystem()
@@ -183,18 +180,15 @@ def test_configure_logger(self):
self.app.log._configure_logger(None, sys.stderr, None, '', False)
logger.handlers[:] = []
- @mock.restore_logging()
- def test_setup_logging_subsystem_colorize(self):
+ def test_setup_logging_subsystem_colorize(self, restore_logging):
self.app.log.setup_logging_subsystem(colorize=None)
self.app.log.setup_logging_subsystem(colorize=True)
- @mock.restore_logging()
- def test_setup_logging_subsystem_no_mputil(self):
- with mock.mask_modules('billiard.util'):
- self.app.log.setup_logging_subsystem()
+ @pytest.mark.masked_modules('billiard.util')
+ def test_setup_logging_subsystem_no_mputil(self, restore_logging, mask_modules):
+ self.app.log.setup_logging_subsystem()
- @mock.restore_logging()
- def test_setup_logger(self):
+ def test_setup_logger(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=True)
logger.handlers = []
@@ -202,16 +196,14 @@ def test_setup_logger(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=None)
# setup_logger logs to stderr without logfile argument.
- assert (get_logger_handlers(logger)[0].stream is
+ assert (conftest.get_logger_handlers(logger)[0].stream is
sys.__stderr__)
- @mock.restore_logging()
- def test_setup_logger_no_handlers_stream(self):
+ def test_setup_logger_no_handlers_stream(self, restore_logging):
l = self.get_logger()
l.handlers = []
- with mock.stdouts() as outs:
- stdout, stderr = outs
+ with conftest.stdouts() as (stdout, stderr):
l = self.setup_logger(logfile=sys.stderr,
loglevel=logging.INFO, root=False)
l.info('The quick brown fox...')
@@ -221,7 +213,7 @@ def test_setup_logger_no_handlers_stream(self):
def test_setup_logger_no_handlers_file(self, *args):
tempfile = mktemp(suffix='unittest', prefix='celery')
with patch('builtins.open') as osopen:
- with mock.restore_logging():
+ with conftest.restore_logging_context_manager():
files = defaultdict(StringIO)
def open_file(filename, *args, **kwargs):
@@ -236,16 +228,15 @@ def open_file(filename, *args, **kwargs):
l = self.setup_logger(
logfile=tempfile, loglevel=logging.INFO, root=False,
)
- assert isinstance(get_logger_handlers(l)[0],
+ assert isinstance(conftest.get_logger_handlers(l)[0],
logging.FileHandler)
assert tempfile in files
- @mock.restore_logging()
- def test_redirect_stdouts(self):
+ def test_redirect_stdouts(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
try:
- with mock.wrap_logger(logger) as sio:
+ with conftest.wrap_logger(logger) as sio:
self.app.log.redirect_stdouts_to_logger(
logger, loglevel=logging.ERROR,
)
@@ -257,12 +248,11 @@ def test_redirect_stdouts(self):
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
- @mock.restore_logging()
- def test_logging_proxy(self):
+ def test_logging_proxy(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
- with mock.wrap_logger(logger) as sio:
+ with conftest.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write('foo')
@@ -281,17 +271,16 @@ def test_logging_proxy(self):
p.close()
assert not p.isatty()
- with mock.stdouts() as (stdout, stderr):
+ with conftest.stdouts() as (stdout, stderr):
with in_sighandler():
p.write('foo')
assert stderr.getvalue()
- @mock.restore_logging()
- def test_logging_proxy_bytes(self):
+ def test_logging_proxy_bytes(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
- with mock.wrap_logger(logger) as sio:
+ with conftest.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write(b'foo')
@@ -306,13 +295,12 @@ def test_logging_proxy_bytes(self):
p.close()
assert not p.isatty()
- with mock.stdouts() as (stdout, stderr):
+ with conftest.stdouts() as (stdout, stderr):
with in_sighandler():
p.write(b'foo')
assert stderr.getvalue()
- @mock.restore_logging()
- def test_logging_proxy_recurse_protection(self):
+ def test_logging_proxy_recurse_protection(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
p = LoggingProxy(logger, loglevel=logging.ERROR)
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py
--- a/t/unit/app/test_schedules.py
+++ b/t/unit/app/test_schedules.py
@@ -2,16 +2,16 @@
from contextlib import contextmanager
from datetime import datetime, timedelta
from pickle import dumps, loads
+from unittest import TestCase
from unittest.mock import Mock
import pytest
import pytz
-from case import Case
from celery.schedules import (ParseException, crontab, crontab_parser,
schedule, solar)
-assertions = Case('__init__')
+assertions = TestCase('__init__')
@contextmanager
diff --git a/t/unit/backends/test_cache.py b/t/unit/backends/test_cache.py
--- a/t/unit/backends/test_cache.py
+++ b/t/unit/backends/test_cache.py
@@ -4,12 +4,12 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from kombu.utils.encoding import ensure_bytes, str_to_bytes
from celery import signature, states, uuid
from celery.backends.cache import CacheBackend, DummyClient, backends
from celery.exceptions import ImproperlyConfigured
+from t.unit import conftest
class SomeClass:
@@ -148,7 +148,7 @@ def test_regression_worker_startup_info(self):
'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/'
)
worker = self.app.Worker()
- with mock.stdouts():
+ with conftest.stdouts():
worker.on_start()
assert worker.startup_info()
@@ -201,31 +201,31 @@ class test_get_best_memcache(MockCacheMixin):
def test_pylibmc(self):
with self.mock_pylibmc():
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
assert cache.get_best_memcache()[0].__module__ == 'pylibmc'
- def test_memcache(self):
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- assert (cache.get_best_memcache()[0]().__module__ ==
- 'memcache')
-
- def test_no_implementations(self):
- with mock.mask_modules('pylibmc', 'memcache'):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
- with pytest.raises(ImproperlyConfigured):
- cache.get_best_memcache()
+ assert (cache.get_best_memcache()[0]().__module__ ==
+ 'memcache')
+
+ @pytest.mark.masked_modules('pylibmc', 'memcache')
+ def test_no_implementations(self, mask_modules):
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ with pytest.raises(ImproperlyConfigured):
+ cache.get_best_memcache()
def test_cached(self):
with self.mock_pylibmc():
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
cache.get_best_memcache()[0](behaviors={'foo': 'bar'})
@@ -241,30 +241,30 @@ def test_backends(self):
class test_memcache_key(MockCacheMixin):
- def test_memcache_unicode_key(self):
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache_unicode_key(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- task_id, result = str(uuid()), 42
- b = cache.CacheBackend(backend='memcache', app=self.app)
- b.store_result(task_id, result, state=states.SUCCESS)
- assert b.get_result(task_id) == result
-
- def test_memcache_bytes_key(self):
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ task_id, result = str(uuid()), 42
+ b = cache.CacheBackend(backend='memcache', app=self.app)
+ b.store_result(task_id, result, state=states.SUCCESS)
+ assert b.get_result(task_id) == result
+
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache_bytes_key(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- task_id, result = str_to_bytes(uuid()), 42
- b = cache.CacheBackend(backend='memcache', app=self.app)
- b.store_result(task_id, result, state=states.SUCCESS)
- assert b.get_result(task_id) == result
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ task_id, result = str_to_bytes(uuid()), 42
+ b = cache.CacheBackend(backend='memcache', app=self.app)
+ b.store_result(task_id, result, state=states.SUCCESS)
+ assert b.get_result(task_id) == result
def test_pylibmc_unicode_key(self):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
@@ -274,7 +274,7 @@ def test_pylibmc_unicode_key(self):
assert b.get_result(task_id) == result
def test_pylibmc_bytes_key(self):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
diff --git a/t/unit/backends/test_cassandra.py b/t/unit/backends/test_cassandra.py
--- a/t/unit/backends/test_cassandra.py
+++ b/t/unit/backends/test_cassandra.py
@@ -3,7 +3,6 @@
from unittest.mock import Mock
import pytest
-from case import mock
from celery import states
from celery.exceptions import ImproperlyConfigured
@@ -17,7 +16,6 @@
]
[email protected](*CASSANDRA_MODULES)
class test_CassandraBackend:
def setup(self):
@@ -27,7 +25,8 @@ def setup(self):
cassandra_table='task_results',
)
- def test_init_no_cassandra(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_init_no_cassandra(self, module):
# should raise ImproperlyConfigured when no python-driver
# installed.
from celery.backends import cassandra as mod
@@ -38,7 +37,8 @@ def test_init_no_cassandra(self, *modules):
finally:
mod.cassandra = prev
- def test_init_with_and_without_LOCAL_QUROM(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_init_with_and_without_LOCAL_QUROM(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
@@ -60,12 +60,14 @@ def test_init_with_and_without_LOCAL_QUROM(self, *modules):
app=self.app, keyspace='b', column_family='c',
)
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
@pytest.mark.usefixtures('depends_on_current_app')
- def test_reduce(self, *modules):
+ def test_reduce(self, module):
from celery.backends.cassandra import CassandraBackend
assert loads(dumps(CassandraBackend(app=self.app)))
- def test_get_task_meta_for(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_get_task_meta_for(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
@@ -95,7 +97,8 @@ def test_as_uri(self):
x.as_uri()
x.as_uri(include_password=False)
- def test_store_result(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_store_result(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -4,7 +4,6 @@
import pytest
import pytz
-from case import mock
from kombu.exceptions import EncodeError
try:
@@ -15,6 +14,7 @@
from celery import states, uuid
from celery.backends.mongodb import Binary, InvalidDocument, MongoBackend
from celery.exceptions import ImproperlyConfigured
+from t.unit import conftest
COLLECTION = 'taskmeta_celery'
TASK_ID = uuid()
@@ -529,7 +529,7 @@ def test_regression_worker_startup_info(self):
'/work4us?replicaSet=rs&ssl=true'
)
worker = self.app.Worker()
- with mock.stdouts():
+ with conftest.stdouts():
worker.on_start()
assert worker.startup_info()
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -8,14 +8,15 @@
from unittest.mock import ANY, Mock, call, patch
import pytest
-from case import ContextMock, mock
from celery import signature, states, uuid
from celery.canvas import Signature
+from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import (BackendStoreError, ChordError,
ImproperlyConfigured)
from celery.result import AsyncResult, GroupResult
from celery.utils.collections import AttributeDict
+from t.unit import conftest
def raise_on_second_call(mock, exc, *retval):
@@ -61,7 +62,7 @@ def execute(self):
return [step(*a, **kw) for step, a, kw in self.steps]
-class PubSub(mock.MockCallbacks):
+class PubSub(conftest.MockCallbacks):
def __init__(self, ignore_subscribe_messages=False):
self._subscribed_to = set()
@@ -78,7 +79,7 @@ def get_message(self, timeout=None):
pass
-class Redis(mock.MockCallbacks):
+class Redis(conftest.MockCallbacks):
Connection = Connection
Pipeline = Pipeline
pubsub = PubSub
@@ -158,7 +159,7 @@ def zcount(self, key, min_, max_):
return len(self.zrangebyscore(key, min_, max_))
-class Sentinel(mock.MockCallbacks):
+class Sentinel(conftest.MockCallbacks):
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
self.sentinel_kwargs = sentinel_kwargs
diff --git a/t/unit/concurrency/test_prefork.py b/t/unit/concurrency/test_prefork.py
--- a/t/unit/concurrency/test_prefork.py
+++ b/t/unit/concurrency/test_prefork.py
@@ -5,7 +5,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
import t.skip
from celery.app.defaults import DEFAULTS
@@ -64,55 +63,53 @@ def Loader(*args, **kwargs):
return loader
@patch('celery.platforms.signals')
- def test_process_initializer(self, _signals, set_mp_process_title):
- with mock.restore_logging():
- from celery import signals
- from celery._state import _tls
- from celery.concurrency.prefork import (WORKER_SIGIGNORE,
- WORKER_SIGRESET,
- process_initializer)
- on_worker_process_init = Mock()
- signals.worker_process_init.connect(on_worker_process_init)
-
- with self.Celery(loader=self.Loader) as app:
- app.conf = AttributeDict(DEFAULTS)
- process_initializer(app, 'awesome.worker.com')
- _signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
- _signals.reset.assert_any_call(*WORKER_SIGRESET)
- assert app.loader.init_worker.call_count
- on_worker_process_init.assert_called()
- assert _tls.current_app is app
- set_mp_process_title.assert_called_with(
- 'celeryd', hostname='awesome.worker.com',
- )
-
- with patch('celery.app.trace.setup_worker_optimizations') as S:
- os.environ['FORKED_BY_MULTIPROCESSING'] = '1'
- try:
- process_initializer(app, 'luke.worker.com')
- S.assert_called_with(app, 'luke.worker.com')
- finally:
- os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
+ def test_process_initializer(self, _signals, set_mp_process_title, restore_logging):
+ from celery import signals
+ from celery._state import _tls
+ from celery.concurrency.prefork import (WORKER_SIGIGNORE,
+ WORKER_SIGRESET,
+ process_initializer)
+ on_worker_process_init = Mock()
+ signals.worker_process_init.connect(on_worker_process_init)
+
+ with self.Celery(loader=self.Loader) as app:
+ app.conf = AttributeDict(DEFAULTS)
+ process_initializer(app, 'awesome.worker.com')
+ _signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
+ _signals.reset.assert_any_call(*WORKER_SIGRESET)
+ assert app.loader.init_worker.call_count
+ on_worker_process_init.assert_called()
+ assert _tls.current_app is app
+ set_mp_process_title.assert_called_with(
+ 'celeryd', hostname='awesome.worker.com',
+ )
- os.environ['CELERY_LOG_FILE'] = 'worker%I.log'
- app.log.setup = Mock(name='log_setup')
+ with patch('celery.app.trace.setup_worker_optimizations') as S:
+ os.environ['FORKED_BY_MULTIPROCESSING'] = '1'
try:
process_initializer(app, 'luke.worker.com')
+ S.assert_called_with(app, 'luke.worker.com')
finally:
- os.environ.pop('CELERY_LOG_FILE', None)
+ os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
+
+ os.environ['CELERY_LOG_FILE'] = 'worker%I.log'
+ app.log.setup = Mock(name='log_setup')
+ try:
+ process_initializer(app, 'luke.worker.com')
+ finally:
+ os.environ.pop('CELERY_LOG_FILE', None)
@patch('celery.platforms.set_pdeathsig')
- def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title):
- with mock.restore_logging():
- from celery import signals
- on_worker_process_init = Mock()
- signals.worker_process_init.connect(on_worker_process_init)
- from celery.concurrency.prefork import process_initializer
-
- with self.Celery(loader=self.Loader) as app:
- app.conf = AttributeDict(DEFAULTS)
- process_initializer(app, 'awesome.worker.com')
- _set_pdeathsig.assert_called_once_with('SIGKILL')
+ def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title, restore_logging):
+ from celery import signals
+ on_worker_process_init = Mock()
+ signals.worker_process_init.connect(on_worker_process_init)
+ from celery.concurrency.prefork import process_initializer
+
+ with self.Celery(loader=self.Loader) as app:
+ app.conf = AttributeDict(DEFAULTS)
+ process_initializer(app, 'awesome.worker.com')
+ _set_pdeathsig.assert_called_once_with('SIGKILL')
class test_process_destructor:
diff --git a/t/unit/contrib/test_migrate.py b/t/unit/contrib/test_migrate.py
--- a/t/unit/contrib/test_migrate.py
+++ b/t/unit/contrib/test_migrate.py
@@ -3,7 +3,6 @@
import pytest
from amqp import ChannelError
-from case import mock
from kombu import Connection, Exchange, Producer, Queue
from kombu.transport.virtual import QoS
from kombu.utils.encoding import ensure_bytes
@@ -14,6 +13,7 @@
migrate_tasks, move, move_by_idmap,
move_by_taskmap, move_task_by_id,
start_filter, task_id_eq, task_id_in)
+from t.unit import conftest
# hack to ignore error at shutdown
QoS.restore_at_shutdown = False
@@ -203,7 +203,7 @@ def test_maybe_queue():
def test_filter_status():
- with mock.stdouts() as (stdout, stderr):
+ with conftest.stdouts() as (stdout, stderr):
filter_status(State(), {'id': '1', 'task': 'add'}, Mock())
assert stdout.getvalue()
diff --git a/t/unit/events/test_snapshot.py b/t/unit/events/test_snapshot.py
--- a/t/unit/events/test_snapshot.py
+++ b/t/unit/events/test_snapshot.py
@@ -1,7 +1,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.app.events import Events
from celery.events.snapshot import Polaroid, evcam
@@ -106,8 +105,7 @@ def setup(self):
self.app.events = self.MockEvents()
self.app.events.app = self.app
- @mock.restore_logging()
- def test_evcam(self):
+ def test_evcam(self, restore_logging):
evcam(Polaroid, timer=timer, app=self.app)
evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app)
self.MockReceiver.raise_keyboard_interrupt = True
diff --git a/t/unit/fixups/test_django.py b/t/unit/fixups/test_django.py
--- a/t/unit/fixups/test_django.py
+++ b/t/unit/fixups/test_django.py
@@ -2,10 +2,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.fixups.django import (DjangoFixup, DjangoWorkerFixup,
FixupWarning, _maybe_close_fd, fixup)
+from t.unit import conftest
class FixupCase:
@@ -54,6 +54,18 @@ def test_autodiscover_tasks(self, patching):
apps.get_app_configs.return_value = configs
assert f.autodiscover_tasks() == [c.name for c in configs]
+ @pytest.mark.masked_modules('django')
+ def test_fixup_no_django(self, patching, mask_modules):
+ with patch('celery.fixups.django.DjangoFixup') as Fixup:
+ patching.setenv('DJANGO_SETTINGS_MODULE', '')
+ fixup(self.app)
+ Fixup.assert_not_called()
+
+ patching.setenv('DJANGO_SETTINGS_MODULE', 'settings')
+ with pytest.warns(FixupWarning):
+ fixup(self.app)
+ Fixup.assert_not_called()
+
def test_fixup(self, patching):
with patch('celery.fixups.django.DjangoFixup') as Fixup:
patching.setenv('DJANGO_SETTINGS_MODULE', '')
@@ -61,11 +73,7 @@ def test_fixup(self, patching):
Fixup.assert_not_called()
patching.setenv('DJANGO_SETTINGS_MODULE', 'settings')
- with mock.mask_modules('django'):
- with pytest.warns(FixupWarning):
- fixup(self.app)
- Fixup.assert_not_called()
- with mock.module_exists('django'):
+ with conftest.module_exists('django'):
import django
django.VERSION = (1, 11, 1)
fixup(self.app)
@@ -257,17 +265,17 @@ def test_on_worker_ready(self):
f._settings.DEBUG = True
f.on_worker_ready()
- def test_validate_models(self, patching):
- with mock.module('django', 'django.db', 'django.core',
- 'django.core.cache', 'django.conf',
- 'django.db.utils'):
- f = self.Fixup(self.app)
- f.django_setup = Mock(name='django.setup')
- patching.modules('django.core.checks')
- from django.core.checks import run_checks
- f.validate_models()
- f.django_setup.assert_called_with()
- run_checks.assert_called_with()
+ @pytest.mark.patched_module('django', 'django.db', 'django.core',
+ 'django.core.cache', 'django.conf',
+ 'django.db.utils')
+ def test_validate_models(self, patching, module):
+ f = self.Fixup(self.app)
+ f.django_setup = Mock(name='django.setup')
+ patching.modules('django.core.checks')
+ from django.core.checks import run_checks
+ f.validate_models()
+ f.django_setup.assert_called_with()
+ run_checks.assert_called_with()
def test_django_setup(self, patching):
patching('celery.fixups.django.symbol_by_name')
diff --git a/t/unit/security/test_certificate.py b/t/unit/security/test_certificate.py
--- a/t/unit/security/test_certificate.py
+++ b/t/unit/security/test_certificate.py
@@ -3,10 +3,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.exceptions import SecurityError
from celery.security.certificate import Certificate, CertStore, FSCertStore
+from t.unit import conftest
from . import CERT1, CERT2, KEY1
from .case import SecurityCase
@@ -84,7 +84,7 @@ def test_init(self, Certificate, glob, isdir):
cert.has_expired.return_value = False
isdir.return_value = True
glob.return_value = ['foo.cert']
- with mock.open():
+ with conftest.open():
cert.get_id.return_value = 1
path = os.path.join('var', 'certs')
diff --git a/t/unit/security/test_security.py b/t/unit/security/test_security.py
--- a/t/unit/security/test_security.py
+++ b/t/unit/security/test_security.py
@@ -19,13 +19,13 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from kombu.exceptions import SerializerNotInstalled
from kombu.serialization import disable_insecure_serializers, registry
from celery.exceptions import ImproperlyConfigured, SecurityError
from celery.security import disable_untrusted_serializers, setup_security
from celery.security.utils import reraise_errors
+from t.unit import conftest
from . import CERT1, KEY1
from .case import SecurityCase
@@ -120,7 +120,7 @@ def effect(*args):
self.app.conf.task_serializer = 'auth'
self.app.conf.accept_content = ['auth']
- with mock.open(side_effect=effect):
+ with conftest.open(side_effect=effect):
with patch('celery.security.registry') as registry:
store = Mock()
self.app.setup_security(['json'], key, cert, store)
diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -4,12 +4,12 @@
from unittest.mock import ANY, MagicMock, Mock, patch, sentinel
import pytest
-from case import ContextMock
from kombu import Queue
from kombu.exceptions import EncodeError
from celery import Task, group, uuid
from celery.app.task import _reprtask
+from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import Ignore, ImproperlyConfigured, Retry
from celery.result import AsyncResult, EagerResult
from celery.utils.time import parse_iso8601
diff --git a/t/unit/utils/test_platforms.py b/t/unit/utils/test_platforms.py
--- a/t/unit/utils/test_platforms.py
+++ b/t/unit/utils/test_platforms.py
@@ -7,7 +7,6 @@
from unittest.mock import Mock, call, patch
import pytest
-from case import mock
import t.skip
from celery import _find_option_with_arg, platforms
@@ -22,6 +21,7 @@
set_process_title, setgid, setgroups, setuid,
signals)
from celery.utils.text import WhateverIO
+from t.unit import conftest
try:
import resource
@@ -429,7 +429,7 @@ def test_without_resource(self):
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
@@ -530,7 +530,7 @@ def test_create_pidlock(self, Pidfile):
p = Pidfile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
- with mock.stdouts() as (_, err):
+ with conftest.stdouts() as (_, err):
with pytest.raises(SystemExit):
create_pidlock('/var/pid')
assert 'already exists' in err.getvalue()
@@ -567,14 +567,14 @@ def test_is_locked(self, exists):
assert not p.is_locked()
def test_read_pid(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('1816\n')
s.seek(0)
p = Pidfile('/var/pid')
assert p.read_pid() == 1816
def test_read_pid_partially_written(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('1816')
s.seek(0)
p = Pidfile('/var/pid')
@@ -584,20 +584,20 @@ def test_read_pid_partially_written(self):
def test_read_pid_raises_ENOENT(self):
exc = IOError()
exc.errno = errno.ENOENT
- with mock.open(side_effect=exc):
+ with conftest.open(side_effect=exc):
p = Pidfile('/var/pid')
assert p.read_pid() is None
def test_read_pid_raises_IOError(self):
exc = IOError()
exc.errno = errno.EAGAIN
- with mock.open(side_effect=exc):
+ with conftest.open(side_effect=exc):
p = Pidfile('/var/pid')
with pytest.raises(IOError):
p.read_pid()
def test_read_pid_bogus_pidfile(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('eighteensixteen\n')
s.seek(0)
p = Pidfile('/var/pid')
@@ -655,7 +655,7 @@ def test_remove_if_stale_process_alive(self, kill):
@patch('os.kill')
def test_remove_if_stale_process_dead(self, kill):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
@@ -668,7 +668,7 @@ def test_remove_if_stale_process_dead(self, kill):
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
@@ -679,7 +679,7 @@ def test_remove_if_stale_broken_pid(self):
@patch('os.kill')
def test_remove_if_stale_unprivileged_user(self, kill):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1817
@@ -704,7 +704,7 @@ def test_remove_if_stale_no_pidfile(self):
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
@@ -731,7 +731,7 @@ def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
diff --git a/t/unit/utils/test_serialization.py b/t/unit/utils/test_serialization.py
--- a/t/unit/utils/test_serialization.py
+++ b/t/unit/utils/test_serialization.py
@@ -6,7 +6,6 @@
import pytest
import pytz
-from case import mock
from kombu import Queue
from celery.utils.serialization import (STRTOBOOL_DEFAULT_TABLE,
@@ -18,14 +17,14 @@
class test_AAPickle:
- def test_no_cpickle(self):
+ @pytest.mark.masked_modules('cPickle')
+ def test_no_cpickle(self, mask_modules):
prev = sys.modules.pop('celery.utils.serialization', None)
try:
- with mock.mask_modules('cPickle'):
- import pickle as orig_pickle
+ import pickle as orig_pickle
- from celery.utils.serialization import pickle
- assert pickle.dumps is orig_pickle.dumps
+ from celery.utils.serialization import pickle
+ assert pickle.dumps is orig_pickle.dumps
finally:
sys.modules['celery.utils.serialization'] = prev
diff --git a/t/unit/utils/test_threads.py b/t/unit/utils/test_threads.py
--- a/t/unit/utils/test_threads.py
+++ b/t/unit/utils/test_threads.py
@@ -1,10 +1,10 @@
from unittest.mock import patch
import pytest
-from case import mock
from celery.utils.threads import (Local, LocalManager, _FastLocalStack,
_LocalStack, bgThread)
+from t.unit import conftest
class test_bgThread:
@@ -17,7 +17,7 @@ def body(self):
raise KeyError()
with patch('os._exit') as _exit:
- with mock.stdouts():
+ with conftest.stdouts():
_exit.side_effect = ValueError()
t = T()
with pytest.raises(ValueError):
diff --git a/t/unit/worker/test_autoscale.py b/t/unit/worker/test_autoscale.py
--- a/t/unit/worker/test_autoscale.py
+++ b/t/unit/worker/test_autoscale.py
@@ -2,7 +2,7 @@
from time import monotonic
from unittest.mock import Mock, patch
-from case import mock
+import pytest
from celery.concurrency.base import BasePool
from celery.utils.objects import Bunch
@@ -100,8 +100,8 @@ def join(self, timeout=None):
x.stop()
assert not x.joined
- @mock.sleepdeprived(module=autoscale)
- def test_body(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_body(self, sleepdeprived):
worker = Mock(name='worker')
x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
x.body()
@@ -216,8 +216,8 @@ def body(self):
_exit.assert_called_with(1)
stderr.write.assert_called()
- @mock.sleepdeprived(module=autoscale)
- def test_no_negative_scale(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_no_negative_scale(self, sleepdeprived):
total_num_processes = []
worker = Mock(name='worker')
x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
diff --git a/t/unit/worker/test_consumer.py b/t/unit/worker/test_consumer.py
--- a/t/unit/worker/test_consumer.py
+++ b/t/unit/worker/test_consumer.py
@@ -5,8 +5,8 @@
import pytest
from billiard.exceptions import RestartFreqExceeded
-from case import ContextMock
+from celery.contrib.testing.mocks import ContextMock
from celery.utils.collections import LimitedSet
from celery.worker.consumer.agent import Agent
from celery.worker.consumer.consumer import (CANCEL_TASKS_BY_DEFAULT, CLOSE,
diff --git a/t/unit/worker/test_worker.py b/t/unit/worker/test_worker.py
--- a/t/unit/worker/test_worker.py
+++ b/t/unit/worker/test_worker.py
@@ -11,7 +11,6 @@
import pytest
from amqp import ChannelError
-from case import mock
from kombu import Connection
from kombu.asynchronous import get_event_loop
from kombu.common import QoS, ignore_errors
@@ -804,8 +803,8 @@ def test_with_autoscaler(self):
assert worker.autoscaler
@t.skip.if_win32
- @mock.sleepdeprived(module=autoscale)
- def test_with_autoscaler_file_descriptor_safety(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_with_autoscaler_file_descriptor_safety(self, sleepdeprived):
# Given: a test celery worker instance with auto scaling
worker = self.create_worker(
autoscale=[10, 5], use_eventloop=True,
@@ -853,8 +852,8 @@ def test_with_autoscaler_file_descriptor_safety(self):
worker.pool.terminate()
@t.skip.if_win32
- @mock.sleepdeprived(module=autoscale)
- def test_with_file_descriptor_safety(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_with_file_descriptor_safety(self, sleepdeprived):
# Given: a test celery worker instance
worker = self.create_worker(
autoscale=[10, 5], use_eventloop=True,
| 5.1.2: test suite uses outdated `case` module
Looks like pytest based test suite is using `case` module which is not maintained (last release was in 2017).
```console
+ /usr/bin/pytest -ra --ignore=t/unit/backends/test_s3.py --ignore=t/unit/backends/test_mongodb.py --ignore=t/distro/test_CI_reqs.py
=========================================================================== test session starts ============================================================================
platform linux -- Python 3.8.12, pytest-6.2.5, py-1.10.0, pluggy-0.13.1
benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
Using --randomly-seed=4261956870
rootdir: /home/tkloczko/rpmbuild/BUILD/celery-5.1.2, configfile: pytest.ini
plugins: forked-1.3.0, shutil-1.7.0, virtualenv-1.7.0, expect-1.1.0, flake8-1.0.7, timeout-1.4.2, betamax-0.8.1, freezegun-0.4.2, aspectlib-1.5.2, toolbox-0.5, rerunfailures-9.1.1, requests-mock-1.9.3, cov-2.12.1, flaky-3.7.0, benchmark-3.4.1, xdist-2.3.0, pylama-7.7.1, datadir-1.3.1, regressions-2.2.0, cases-3.6.3, xprocess-0.18.1, black-0.3.12, asyncio-0.15.1, subtests-0.5.0, isort-2.0.0, hypothesis-6.14.6, mock-3.6.1, profiling-1.7.0, randomly-3.8.0, nose2pytest-1.0.8, pyfakefs-4.5.1, tornado-0.8.1, twisted-1.13.3, aiohttp-0.3.0, localserver-0.5.0, anyio-3.3.1, trio-0.7.0, Faker-8.12.2
collected 0 items / 1 error
================================================================================== ERRORS ==================================================================================
______________________________________________________________________ ERROR collecting test session _______________________________________________________________________
/usr/lib64/python3.8/importlib/__init__.py:127: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:1014: in _gcd_import
???
<frozen importlib._bootstrap>:991: in _find_and_load
???
<frozen importlib._bootstrap>:975: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:671: in _load_unlocked
???
/usr/lib/python3.8/site-packages/_pytest/assertion/rewrite.py:170: in exec_module
exec(co, module.__dict__)
t/unit/conftest.py:10: in <module>
from case.utils import decorator
E ModuleNotFoundError: No module named 'case'
========================================================================= short test summary info ==========================================================================
ERROR - ModuleNotFoundError: No module named 'case'
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
============================================================================= 1 error in 0.57s =============================================================================
pytest-xprocess reminder::Be sure to terminate the started process by running 'pytest --xkill' if you have not explicitly done so in your fixture with 'xprocess.getinfo(<process_name>).terminate()'.
```
| Hey @kloczek :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-11-10T21:23:21 |
celery/celery | 7,089 | celery__celery-7089 | [
"7052"
] | fe37cd834109810dc778845378880abdf7d08ff6 | diff --git a/celery/backends/asynchronous.py b/celery/backends/asynchronous.py
--- a/celery/backends/asynchronous.py
+++ b/celery/backends/asynchronous.py
@@ -66,18 +66,30 @@ def wait_for(self, p, wait, timeout=None):
class greenletDrainer(Drainer):
spawn = None
_g = None
+ _drain_complete_event = None # event, sended (and recreated) after every drain_events iteration
+
+ def _create_drain_complete_event(self):
+ """create new self._drain_complete_event object"""
+ pass
+
+ def _send_drain_complete_event(self):
+ """raise self._drain_complete_event for wakeup .wait_for"""
+ pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._started = threading.Event()
self._stopped = threading.Event()
self._shutdown = threading.Event()
+ self._create_drain_complete_event()
def run(self):
self._started.set()
while not self._stopped.is_set():
try:
self.result_consumer.drain_events(timeout=1)
+ self._send_drain_complete_event()
+ self._create_drain_complete_event()
except socket.timeout:
pass
self._shutdown.set()
@@ -89,8 +101,14 @@ def start(self):
def stop(self):
self._stopped.set()
+ self._send_drain_complete_event()
self._shutdown.wait(THREAD_TIMEOUT_MAX)
+ def wait_for(self, p, wait, timeout=None):
+ self.start()
+ if not p.ready:
+ self._drain_complete_event.wait(timeout=timeout)
+
@register_drainer('eventlet')
class eventletDrainer(greenletDrainer):
@@ -101,10 +119,12 @@ def spawn(self, func):
sleep(0)
return g
- def wait_for(self, p, wait, timeout=None):
- self.start()
- if not p.ready:
- self._g._exit_event.wait(timeout=timeout)
+ def _create_drain_complete_event(self):
+ from eventlet.event import Event
+ self._drain_complete_event = Event()
+
+ def _send_drain_complete_event(self):
+ self._drain_complete_event.send()
@register_drainer('gevent')
@@ -116,11 +136,13 @@ def spawn(self, func):
gevent.sleep(0)
return g
- def wait_for(self, p, wait, timeout=None):
- import gevent
- self.start()
- if not p.ready:
- gevent.wait([self._g], timeout=timeout)
+ def _create_drain_complete_event(self):
+ from gevent.event import Event
+ self._drain_complete_event = Event()
+
+ def _send_drain_complete_event(self):
+ self._drain_complete_event.set()
+ self._create_drain_complete_event()
class AsyncBackendMixin:
| diff --git a/t/unit/backends/test_asynchronous.py b/t/unit/backends/test_asynchronous.py
--- a/t/unit/backends/test_asynchronous.py
+++ b/t/unit/backends/test_asynchronous.py
@@ -158,7 +158,11 @@ def sleep(self):
def result_consumer_drain_events(self, timeout=None):
import eventlet
- eventlet.sleep(0)
+
+ # `drain_events` of asynchronous backends with pubsub have to sleep
+ # while waiting events for not more then `interval` timeout,
+ # but events may coming sooner
+ eventlet.sleep(timeout/10)
def schedule_thread(self, thread):
import eventlet
@@ -204,7 +208,11 @@ def sleep(self):
def result_consumer_drain_events(self, timeout=None):
import gevent
- gevent.sleep(0)
+
+ # `drain_events` of asynchronous backends with pubsub have to sleep
+ # while waiting events for not more then `interval` timeout,
+ # but events may coming sooner
+ gevent.sleep(timeout/10)
def schedule_thread(self, thread):
import gevent
| AsyncResult.get() has at least 1 second latency while executing under gevent
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- https://github.com/celery/celery/pull/5974
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
When it's used asynchronous backend as Redis with pubsub, result of short task (0.1s length, for example) available immediate.
Once i add gevent, result of the same short task became available after 1s, 10 times slower!
I will clarify that in both cases I used identical separate worker runed in docker with standard perfork pool.
`celery -A tasks worker --prefetch-multiplier=1 --concurrency=10`
This behavior relate with implementation features of GH-5974
AsyncResult.get() in a gevent context has at least 1 second latency, because
>I've updated the code to have a specific implementation for gevent and eventlet that will cause wait_for to only return every "timeout" # of seconds, rather than returning immediately
I would like the task result to be available as soon as possible after the worker has submitted it. I think that `wait_for` have to return immediate after socket operation `result_consumer.drain_events` occurs, rather than every "timeout" # of seconds.
It won't cause overhead, because socket operation still not block event loop, so it let other greenlets executing.
# Design
Now in `wait_for` it wait (with timeout 1 second) when `greenletDrainer.run` will finish, of course it's never finish, so it return after 1 second timeout.
If in `wait_for` wait the end of socket operation `_pubsub.get_message` instead `greenletDrainer.run` it will less latency and still not block event loop, so it let other greenlets executing.
I can prepare PR with changes in backends.asynchronous greenletDrainer, eventletDrainer, geventDrainer, that every time after `result_consumer.drain_events` (in greenletDrainer run loop) will send notification for wakeup all greenlets waiting results in `wait_for`.
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
Asynchronous backends (for example Redis) will be same efficient under gevent context as without it.
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
Another workaround may be to pass `interval` argument into `BaseResultConsumer._wait_for_pending` for pass it into `drain_events_until` and than into `wait_for`.
In this case it will be possible define own timeout for `wait_for` interation when execute AsyncResult.get(interval=0.05).
## Howto reproduce
1. run docker with redis and rabbitmq
```bash
docker run -d -p 6379:6379 --name redis redis
docker run -d -p 5672:5672 --name rabbit rabbitmq:3
```
2.
create tasks.py with your ip address for broker and backend
```python
from celery import Celery
import time
app = Celery('tasks', broker='pyamqp://[email protected]//', backend='redis://192.168.1.40')
@app.task(bind=True)
def sleep(self, timeout):
print(f'task {self.request.id} sleep({timeout}) started')
time.sleep(timeout)
print(f'task {self.request.id} sleep({timeout}) finished')
return f'task {self.request.id} sleep({timeout}) finished'
```
create Dockerfile
```Dockerfile
FROM python:3.8
RUN pip install celery redis
WORKDIR /root
CMD bash
```
build and run worker
```bash
docker build . -t worker
docker run -it --entrypoint celery -v $(pwd):/root worker -A tasks worker --prefetch-multiplier=1 --concurrency=10 --loglevel DEBUG
```
create gevent_example.py
```python
import gevent
from gevent import monkey; monkey.patch_all()
from datetime import datetime
from tasks import sleep
st = datetime.now()
def test(timeout):
print(f'{datetime.now() - st} sleep.delay({timeout})')
a = sleep.delay(timeout)
gevent.sleep(0)
res = a.get()
print(f'{datetime.now()-st} {res}')
return str(res)
jobs = [gevent.spawn(test, timeout) for timeout in [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]
gevent.sleep(0)
gevent.joinall(jobs)
print(f'{datetime.now()-st} finish')
```
now run gevent_example.py
```
0:00:00.015999 sleep.delay(0.1)
0:00:00.055000 sleep.delay(0.1)
0:00:00.055000 sleep.delay(0.1)
0:00:00.055999 sleep.delay(0.1)
0:00:00.055999 sleep.delay(0.1)
0:00:00.056997 sleep.delay(0.1)
0:00:01.226389 task 3a4074c3-294b-4bfc-9139-8ed8724ef564 sleep(0.1) finished
0:00:01.226389 task e69e3232-f925-4a9e-afbc-13add65e5806 sleep(0.1) finished
0:00:01.226389 task 23c40dd4-6083-4d04-aac9-3c23e8508912 sleep(0.1) finished
0:00:01.241475 task df35ca20-8e7d-47b0-9fe8-13fe6b1a7a65 sleep(0.1) finished
0:00:01.241475 task 76bf5b84-fe4b-4791-9686-2dd3018221b0 sleep(0.1) finished
0:00:01.241475 task 2fc0ae38-2521-4371-8caf-88d36ea53eff sleep(0.1) finished
0:00:01.241475 finish
```
It is seen that first task with length 0.1s has completed only after ~1.23s (0:00:01.226389)
But if comment line with monkey patching:
```python
# from gevent import monkey; monkey.patch_all()
```
```
0:00:00.015999 sleep.delay(0.1)
0:00:00.226367 sleep.delay(0.1)
0:00:00.228763 sleep.delay(0.1)
0:00:00.231746 sleep.delay(0.1)
0:00:00.233513 sleep.delay(0.1)
0:00:00.234733 sleep.delay(0.1)
0:00:00.338647 task 38f76488-d956-4671-be14-e43df9a593aa sleep(0.1) finished
0:00:00.349064 task 5222feed-6a49-4a55-8a47-f516c3642c30 sleep(0.1) finished
0:00:00.354673 task 87529c14-50a2-4ab3-8e3f-b1f9d7241c42 sleep(0.1) finished
0:00:00.361298 task af68327d-6c37-44e4-9ffa-388ef7c8a8e7 sleep(0.1) finished
0:00:00.363136 task 390980d6-e99d-40b4-afa5-241080ade7a4 sleep(0.1) finished
0:00:00.364271 task f9115418-3175-4caa-98d2-2b39be4ace41 sleep(0.1) finished
0:00:00.364271 finish
```
It is seen that first task with length 0.1s has completed just after ~0.34s (0:00:00.338647)
with proposed changes in backends.asynchronous become:
```
0:00:00.016001 sleep.delay(0.1)
0:00:00.054004 sleep.delay(0.1)
0:00:00.055003 sleep.delay(0.1)
0:00:00.055003 sleep.delay(0.1)
0:00:00.056002 sleep.delay(0.1)
0:00:00.056002 sleep.delay(0.1)
0:00:00.322748 task 00a71b72-c12d-40fe-b7e1-7526120c25ad sleep(0.1) finished
0:00:00.325041 task fa31d127-35c9-42f8-a5a4-0d1e29172b59 sleep(0.1) finished
0:00:00.346190 task 3ecb8a4e-78e2-471d-b6ac-e84f9b2c44ac sleep(0.1) finished
0:00:00.349314 task eaa27232-37b1-42ed-87f5-e92c19e87aed sleep(0.1) finished
0:00:00.351648 task 48ed06c8-3683-4353-88e1-f35aeb870332 sleep(0.1) finished
0:00:00.353663 task 8e95e4b7-6988-4b00-b93d-d4240eb38645 sleep(0.1) finished
0:00:00.353663 finish
```
It is seen that first task with length 0.1s has completed just after ~0.32s (0:00:00.322748)
| Hey @mrmaxi :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2021-11-14T13:28:30 |
celery/celery | 7,244 | celery__celery-7244 | [
"7200",
"7200"
] | 0dd1e470ffe05646877ddf076d2700a8f5a824a9 | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -327,6 +327,10 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
argv.remove('--detach')
if '-D' in argv:
argv.remove('-D')
+ if "--uid" in argv:
+ argv.remove('--uid')
+ if "--gid" in argv:
+ argv.remove('--gid')
return detach(sys.executable,
argv,
| Start celery in the detached mode
Dears,
Have been dealing with an issue to start celery in the detached mode. Nothing has been logged.
`root>celery multi start <Node> -A <App> -Q <Queue> -c 1 --without-mingle --without-gossip --uid=<user> --gid=
<user_group> --pidfile=<path to pid> --logfile=<path to log>`
celery multi start in the verbose mode logs the following command but it fails without any logging as well:
` root>/usr/bin/python3 -m celery -A <App> worker --detach -Q <Queue>-c 1 --uid=<user> --gid=<user_group> --pidfile=<path to pid> --logfile=<path to log> -n <Node> --executable=/usr/bin/python3`
By debugging detach(...) function from the celery.bin.worker.py I identified that detached() has already changed process owner to the <user>/<user_group>. But os.execv(path, [path] + argv) call passes --uid/--gid prams again. And definitely pointed used does not have permissions so it fails with:
```
return os.initgroups(username, gid)
OSError: [Errno 1] Operation not permitted
```
```
def detach(path, argv, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, fake=False, app=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
# `detached()` will attempt to touch the logfile to confirm that error
# messages won't be lost after detaching stdout/err, but this means we need
# to pre-format it rather than relying on `setup_logging_subsystem()` like
# we can elsewhere.
logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): <<< changes user here
try:
if executable is not None:
path = executable
os.execv(path, [path] + argv) <<< fails here
return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
app = current_app
app.log.setup_logging_subsystem(
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
return EX_FAILURE
```
The issue reproduced on all 5.x.x versions.
Start celery in the detached mode
Dears,
Have been dealing with an issue to start celery in the detached mode. Nothing has been logged.
`root>celery multi start <Node> -A <App> -Q <Queue> -c 1 --without-mingle --without-gossip --uid=<user> --gid=
<user_group> --pidfile=<path to pid> --logfile=<path to log>`
celery multi start in the verbose mode logs the following command but it fails without any logging as well:
` root>/usr/bin/python3 -m celery -A <App> worker --detach -Q <Queue>-c 1 --uid=<user> --gid=<user_group> --pidfile=<path to pid> --logfile=<path to log> -n <Node> --executable=/usr/bin/python3`
By debugging detach(...) function from the celery.bin.worker.py I identified that detached() has already changed process owner to the <user>/<user_group>. But os.execv(path, [path] + argv) call passes --uid/--gid prams again. And definitely pointed used does not have permissions so it fails with:
```
return os.initgroups(username, gid)
OSError: [Errno 1] Operation not permitted
```
```
def detach(path, argv, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, fake=False, app=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
# `detached()` will attempt to touch the logfile to confirm that error
# messages won't be lost after detaching stdout/err, but this means we need
# to pre-format it rather than relying on `setup_logging_subsystem()` like
# we can elsewhere.
logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): <<< changes user here
try:
if executable is not None:
path = executable
os.execv(path, [path] + argv) <<< fails here
return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
app = current_app
app.log.setup_logging_subsystem(
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
return EX_FAILURE
```
The issue reproduced on all 5.x.x versions.
| Hey @ssvasilyev :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
This should be easy to fix since we just need to pop them from `argv`.
Care to provide a PR?
Hey @ssvasilyev :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
This should be easy to fix since we just need to pop them from `argv`.
Care to provide a PR? | 2022-01-20T20:59:50 |
|
celery/celery | 7,246 | celery__celery-7246 | [
"7245"
] | 0dd1e470ffe05646877ddf076d2700a8f5a824a9 | diff --git a/celery/bin/base.py b/celery/bin/base.py
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -179,7 +179,6 @@ def __init__(self, *args, **kwargs):
self.params.append(CeleryOption(('-f', '--logfile'), help_group="Daemonization Options"))
self.params.append(CeleryOption(('--pidfile',), help_group="Daemonization Options"))
self.params.append(CeleryOption(('--uid',), help_group="Daemonization Options"))
- self.params.append(CeleryOption(('--uid',), help_group="Daemonization Options"))
self.params.append(CeleryOption(('--gid',), help_group="Daemonization Options"))
self.params.append(CeleryOption(('--umask',), help_group="Daemonization Options"))
self.params.append(CeleryOption(('--executable',), help_group="Daemonization Options"))
| Duplicate param 'uid' in CeleryDaemonCommand
# Checklist
- [ x] I have checked the [issues list](https://github.com/celery/celery/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22Category%3A+Documentation%22+)
for similar or identical bug reports.
- [x ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [ x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
#### Related Issues
- None
#### Possible Duplicates
- None
# Description
CeleryDaemonCommand have duplicated CeleryOption for --uid which leads to all docs using it to have duplicated line (beat, multi, worker, etc).
# Suggestions
Remove the duplicate in CeleryDaemonCommand in celery/celery/bin/base.py
| Hey @Smixi :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2022-01-20T21:29:45 |
|
celery/celery | 7,373 | celery__celery-7373 | [
"7372",
"7372"
] | aedd30b2186718e81fbd935d84f4d145a3fa0bca | diff --git a/celery/__init__.py b/celery/__init__.py
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -20,7 +20,7 @@
__version__ = '5.2.3'
__author__ = 'Ask Solem'
__contact__ = '[email protected]'
-__homepage__ = 'http://celeryproject.org'
+__homepage__ = 'https://docs.celeryq.dev/'
__docformat__ = 'restructuredtext'
__keywords__ = 'task job queue distributed messaging actor'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -175,8 +175,8 @@ def run_tests(self):
]
},
project_urls={
- "Documentation": "https://docs.celeryproject.org/en/latest/index.html",
- "Changelog": "https://docs.celeryproject.org/en/stable/changelog.html",
+ "Documentation": "https://docs.celeryq.dev/en/stable/",
+ "Changelog": "https://docs.celeryq.dev/en/stable/changelog.html",
"Code": "https://github.com/celery/celery",
"Tracker": "https://github.com/celery/celery/issues",
"Funding": "https://opencollective.com/celery"
| Update the links in PyPI to the new site.
The [PyPI page](https://pypi.org/project/celery/) stills points to the old URLs which confuses users because those domains no longer exist and in some cases the SSL certificate is invalid.
Update the links in PyPI to the new site.
The [PyPI page](https://pypi.org/project/celery/) stills points to the old URLs which confuses users because those domains no longer exist and in some cases the SSL certificate is invalid.
| Hey @mvaled :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
Hey @mvaled :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2022-03-23T10:25:14 |
|
celery/celery | 7,470 | celery__celery-7470 | [
"4838"
] | 67c0dd0b8e00779f9c16e533cc2d50932379fc45 | diff --git a/celery/app/amqp.py b/celery/app/amqp.py
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -447,7 +447,7 @@ def _create_task_sender(self):
default_rkey = self.app.conf.task_default_routing_key
default_serializer = self.app.conf.task_serializer
- default_compressor = self.app.conf.result_compression
+ default_compressor = self.app.conf.task_compression
def send_task_message(producer, name, message,
exchange=None, routing_key=None, queue=None,
| diff --git a/t/unit/app/test_amqp.py b/t/unit/app/test_amqp.py
--- a/t/unit/app/test_amqp.py
+++ b/t/unit/app/test_amqp.py
@@ -205,8 +205,7 @@ def test_as_task_message_without_utc(self):
self.app.amqp.as_task_v1(uuid(), 'foo', countdown=30, expires=40)
-class test_AMQP:
-
+class test_AMQP_Base:
def setup(self):
self.simple_message = self.app.amqp.as_task_v2(
uuid(), 'foo', create_sent_event=True,
@@ -215,6 +214,9 @@ def setup(self):
uuid(), 'foo', create_sent_event=False,
)
+
+class test_AMQP(test_AMQP_Base):
+
def test_kwargs_must_be_mapping(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v2(uuid(), 'foo', kwargs=[1, 2])
@@ -336,7 +338,7 @@ def update_conf_runtime_for_tasks_queues(self):
assert router != router_was
-class test_as_task_v2:
+class test_as_task_v2(test_AMQP_Base):
def test_raises_if_args_is_not_tuple(self):
with pytest.raises(TypeError):
@@ -368,8 +370,27 @@ def test_eta_to_datetime(self):
)
assert m.headers['eta'] == eta.isoformat()
- def test_callbacks_errbacks_chord(self):
+ def test_compression(self):
+ self.app.conf.task_compression = 'gzip'
+
+ prod = Mock(name='producer')
+ self.app.amqp.send_task_message(
+ prod, 'foo', self.simple_message_no_sent_event,
+ compression=None
+ )
+ assert prod.publish.call_args[1]['compression'] == 'gzip'
+
+ def test_compression_override(self):
+ self.app.conf.task_compression = 'gzip'
+
+ prod = Mock(name='producer')
+ self.app.amqp.send_task_message(
+ prod, 'foo', self.simple_message_no_sent_event,
+ compression='bz2'
+ )
+ assert prod.publish.call_args[1]['compression'] == 'bz2'
+ def test_callbacks_errbacks_chord(self):
@self.app.task
def t(i):
pass
| Setting task_compression value doesn't actually do anything
## Checklist
* [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
Celery==4.2.0
* [x] I have verified that the issue exists against the `master` branch of Celery.
## Steps to reproduce
```python3
app = Celery()
app.conf.update(task_compression="gzip")
```
## Expected behavior
I would expect that if I set a global `task_compression` config, my tasks are indeed gzipped. I shouldn't have to manually define compression=gzip on each task I want.
## Actual behavior
I don't think this setting is respected. This could be the wrong place to look, but it seems like `amqp` is using the wrong compression setting when sending a message: https://github.com/celery/celery/blob/7d9300b3b94399eafb5e40a08a0cdc8b05f896aa/celery/app/amqp.py#L548. `default_compressor` is being set here: https://github.com/celery/celery/blob/7d9300b3b94399eafb5e40a08a0cdc8b05f896aa/celery/app/amqp.py#L483 - should this read `default_compressor = self.app.conf.task_compression` ? Or am I missing something?
| sorry for the late. current status?
I can confirm that - working the same in my case.
Exactly as @asgoel points out - it is an obvious bug in the code and setting the `task_compression` does nothing, `result_compression` is used instead.
What is even more interesting `result_compression` compresses... just the tasks headers and results are still not compressed, because of the producer call in `AMQPBackend.store_result` looking like:
```
with self.app.amqp.producer_pool.acquire(block=True) as producer:
producer.publish(
payload,
exchange=self.exchange,
routing_key=routing_key,
correlation_id=correlation_id,
serializer=self.serializer,
retry=True, retry_policy=self.retry_policy,
declare=self.on_reply_declare(task_id),
delivery_mode=self.delivery_mode,
)
```
So it is missing the compression kwarg.
It also seems that the `result_compression` does not work for Redis or any other backend too.
which version of celery are you using? 4.4.0?
@auvipy 4.4.0 right now. It compresses task messages, but results are not compressed. Checked that directly in redis too.
Same problem with Celery 4.4.7. No observable change in the messages looking in REDIS when my celeryconfig.py:task_compression="gzip"
When I submit the task with apply_async(compress="gzip") it does work
https://github.com/celery/celery/blob/5b86b35c81ea5a1fbfd439861f4fee6813148d16/celery/app/task.py#L303
I would think the "task_compression" would be referenced here in the code but it is not.
from_config = (
('serializer', 'task_serializer'),
('rate_limit', 'task_default_rate_limit'),
('priority', 'task_default_priority'),
('track_started', 'task_track_started'),
('acks_late', 'task_acks_late'),
('acks_on_failure_or_timeout', 'task_acks_on_failure_or_timeout'),
('reject_on_worker_lost', 'task_reject_on_worker_lost'),
('ignore_result', 'task_ignore_result'),
('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'),
)
i solved this issue with custom serialization function. like this
```python
def pickle_bz2_encoder(data):
return bz2.compress(pickle.dumps(data))
def pickle_bz2_decoder(data):
return pickle.loads(bz2.decompress(data))
serialization.register('pickle-bzip2', pickle_bz2_encoder, pickle_bz2_decoder, 'application/x-pickle-bz2', 'binary')
```
and config
```python
result_serializer = 'pickle-bzip2'
accept_content = ['pickle', 'pickle-bzip2']
```
allowing using compression will be extremely helpful
Hello,
is the `result_compression` option actually doing anything as well for the task results?
I have done a global search for it in `celery` (5.2.3) and this is the only place it is used:
```python
def _create_task_sender(self):
default_retry = self.app.conf.task_publish_retry
default_policy = self.app.conf.task_publish_retry_policy
default_delivery_mode = self.app.conf.task_default_delivery_mode
default_queue = self.default_queue
queues = self.queues
send_before_publish = signals.before_task_publish.send
before_receivers = signals.before_task_publish.receivers
send_after_publish = signals.after_task_publish.send
after_receivers = signals.after_task_publish.receivers
send_task_sent = signals.task_sent.send # XXX compat
sent_receivers = signals.task_sent.receivers
default_evd = self._event_dispatcher
default_exchange = self.default_exchange
default_rkey = self.app.conf.task_default_routing_key
default_serializer = self.app.conf.task_serializer
default_compressor = self.app.conf.result_compression
```
which tells me it is used for task compression , not results?
Was this ever working at all?
You're all absolutely right, it should be `self.app.conf.task_compression` in this instance.
Welp. It seems like compressing results isn't supported and only the setting exists. Is anyone up for a quick PR to fix that? | 2022-04-18T14:43:30 |
celery/celery | 7,481 | celery__celery-7481 | [
"7480"
] | 969e36a8d6823dff88fce2669cfcb59de7275a3d | diff --git a/celery/backends/dynamodb.py b/celery/backends/dynamodb.py
--- a/celery/backends/dynamodb.py
+++ b/celery/backends/dynamodb.py
@@ -128,7 +128,7 @@ def __init__(self, url=None, table_name=None, *args, **kwargs):
self.time_to_live_seconds = int(ttl)
except ValueError as e:
logger.error(
- 'TTL must be a number; got "{ttl}"',
+ f'TTL must be a number; got "{ttl}"',
exc_info=e
)
raise e
| Missing `f` prefix on f-strings
Some strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen.
https://github.com/celery/celery/blob/8d35c655d6ac408023da5e30ca81bc834e68bca0/celery/backends/dynamodb.py#L131
I found this issue automatically. I'm a bot. Beep Boop 🦊. See other issues I found in your repo [here](https://codereview.doctor/celery/celery)
| Hey @code-review-doctor :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2022-04-24T00:16:43 |
|
celery/celery | 7,544 | celery__celery-7544 | [
"7358"
] | b0d6a3bc33c14b82451ffd6ebef2f9b403156ec4 | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -351,7 +351,7 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
quiet=ctx.obj.quiet,
**kwargs)
worker.start()
- return worker.exitcode
+ ctx.exit(worker.exitcode)
except SecurityError as e:
ctx.obj.error(e.args[0])
ctx.exit(1)
diff --git a/t/unit/bin/proj/app.py b/t/unit/bin/proj/app.py
--- a/t/unit/bin/proj/app.py
+++ b/t/unit/bin/proj/app.py
@@ -1,3 +1,4 @@
from celery import Celery
app = Celery(set_as_current=False)
+app.config_from_object("t.integration.test_worker_config")
| diff --git a/requirements/test.txt b/requirements/test.txt
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -2,6 +2,7 @@ pytest~=7.1.1
pytest-celery
pytest-subtests==0.8.0
pytest-timeout~=2.1.0
+pytest-click
boto3>=1.9.178
moto>=2.2.6
# typing extensions
diff --git a/t/integration/test_worker.py b/t/integration/test_worker.py
new file mode 100644
--- /dev/null
+++ b/t/integration/test_worker.py
@@ -0,0 +1,18 @@
+import subprocess
+
+import pytest
+
+
+def test_run_worker():
+ with pytest.raises(subprocess.CalledProcessError) as exc_info:
+ subprocess.check_output(
+ ["celery", "--config", "t.integration.test_worker_config", "worker"],
+ stderr=subprocess.STDOUT)
+
+ called_process_error = exc_info.value
+ assert called_process_error.returncode == 1, called_process_error
+ output = called_process_error.output.decode('utf-8')
+ assert output.find(
+ "Retrying to establish a connection to the message broker after a connection "
+ "loss has been disabled (app.conf.broker_connection_retry_on_startup=False). "
+ "Shutting down...") != -1, output
diff --git a/t/integration/test_worker_config.py b/t/integration/test_worker_config.py
new file mode 100644
--- /dev/null
+++ b/t/integration/test_worker_config.py
@@ -0,0 +1,12 @@
+# Test config for t/integration/test_worker.py
+
+broker_url = 'amqp://guest:guest@foobar:1234//'
+
+# Fail fast for test_run_worker
+broker_connection_retry_on_startup = False
+broker_connection_retry = False
+broker_connection_timeout = 0
+
+worker_log_color = False
+
+worker_redirect_stdouts = False
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -591,8 +591,8 @@ def test_worker_main(self, mocked_celery):
mocked_celery.main.assert_called_with(
args=['worker', '--help'], standalone_mode=False)
- def test_config_from_envvar(self):
- os.environ['CELERYTEST_CONFIG_OBJECT'] = 't.unit.app.test_app'
+ def test_config_from_envvar(self, monkeypatch):
+ monkeypatch.setenv("CELERYTEST_CONFIG_OBJECT", 't.unit.app.test_app')
self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT')
assert self.app.conf.THIS_IS_A_KEY == 'this is a value'
diff --git a/t/unit/bin/test_worker.py b/t/unit/bin/test_worker.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/test_worker.py
@@ -0,0 +1,20 @@
+import pytest
+from click.testing import CliRunner
+
+from celery.app.log import Logging
+from celery.bin.celery import celery
+
+
[email protected](scope='session')
+def use_celery_app_trap():
+ return False
+
+
+def test_cli(isolated_cli_runner: CliRunner):
+ Logging._setup = True # To avoid hitting the logging sanity checks
+ res = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "worker", "--pool", "solo"],
+ catch_exceptions=False
+ )
+ assert res.exit_code == 1, (res, res.stdout)
diff --git a/t/unit/contrib/test_worker.py b/t/unit/contrib/test_worker.py
--- a/t/unit/contrib/test_worker.py
+++ b/t/unit/contrib/test_worker.py
@@ -1,56 +1,47 @@
import pytest
+# this import adds a @shared_task, which uses connect_on_app_finalize
+# to install the celery.ping task that the test lib uses
+import celery.contrib.testing.tasks # noqa: F401
from celery import Celery
from celery.contrib.testing.worker import start_worker
-app = Celery('celerytest',
- backend='cache+memory://',
- broker='memory://',
- )
-
-
[email protected]
-def add(x, y):
- return x + y
-
-
-def test_start_worker():
- app.config_from_object({
- 'worker_hijack_root_logger': False,
- })
- # this import adds a @shared_task, which uses connect_on_app_finalize
- # to install the celery.ping task that the test lib uses
- import celery.contrib.testing.tasks # noqa: F401
-
- # to avoid changing the root logger level to ERROR,
- # we have we have to set both app.log.loglevel start_worker arg to 0
- # (see celery.app.log.setup_logging_subsystem)
- app.log.loglevel = 0
- with start_worker(app=app, loglevel=0):
- result = add.s(1, 2).apply_async()
- val = result.get(timeout=5)
- assert val == 3
-
-
[email protected]
-def error_task():
- raise NotImplementedError()
-
-
-def test_start_worker_with_exception():
- """Make sure that start_worker does not hang on exception"""
- app.config_from_object({
- 'worker_hijack_root_logger': False,
- })
- # this import adds a @shared_task, which uses connect_on_app_finalize
- # to install the celery.ping task that the test lib uses
- import celery.contrib.testing.tasks # noqa: F401
-
- # to avoid changing the root logger level to ERROR,
- # we have we have to set both app.log.loglevel start_worker arg to 0
- # (see celery.app.log.setup_logging_subsystem)
- app.log.loglevel = 0
- with pytest.raises(NotImplementedError):
- with start_worker(app=app, loglevel=0):
- result = error_task.apply_async()
- result.get(timeout=5)
+
+class test_worker:
+ def setup(self):
+ self.app = Celery('celerytest', backend='cache+memory://', broker='memory://',)
+
+ @self.app.task
+ def add(x, y):
+ return x + y
+
+ self.add = add
+
+ @self.app.task
+ def error_task():
+ raise NotImplementedError()
+
+ self.error_task = error_task
+
+ self.app.config_from_object({
+ 'worker_hijack_root_logger': False,
+ })
+
+ # to avoid changing the root logger level to ERROR,
+ # we have we have to set both app.log.loglevel start_worker arg to 0
+ # (see celery.app.log.setup_logging_subsystem)
+ self.app.log.loglevel = 0
+
+ def test_start_worker(self):
+ with start_worker(app=self.app, loglevel=0):
+ result = self.add.s(1, 2).apply_async()
+ val = result.get(timeout=5)
+ assert val == 3
+
+ def test_start_worker_with_exception(self):
+ """Make sure that start_worker does not hang on exception"""
+
+ with pytest.raises(NotImplementedError):
+ with start_worker(app=self.app, loglevel=0):
+ result = self.error_task.apply_async()
+ result.get(timeout=5)
| Celery worker dies, but has an exit code of 0
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [ ] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
```
```
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.2.3 (dawn-chorus)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.2.3 (dawn-chorus) kombu:5.2.3 py:3.9.10
billiard:3.6.4.0 py-amqp:5.0.9
platform -> system:Darwin arch:64bit
kernel version:20.6.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:django-db
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
Expected worker to exit with a non-zero code
# Actual Behavior
Ran celery with the following settings
```
CELERY_BROKER_CONNECTION_RETRY = False
CELERY_BROKER_CONNECTION_TIMEOUT = 2 # seconds
```
because for kubernetes related reasons it's better for us if the workers just die immediately if the config is wrong.
The worker does exit fast enough, but the exit code is 0 despite the following backtrace
```
[2022-03-18 17:33:30,670: CRITICAL/MainProcess] Unrecoverable error: ConnectionRefusedError(61, 'Connection refused')
Traceback (most recent call last):
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/worker.py", line 203, in start
self.blueprint.start(self)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/bootsteps.py", line 365, in start
return self.obj.start()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 326, in start
blueprint.start(self)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/consumer/connection.py", line 21, in start
c.connection = c.connect()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 422, in connect
conn = self.connection_for_read(heartbeat=self.amqheartbeat)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 428, in connection_for_read
return self.ensure_connected(
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 451, in ensure_connected
conn.connect()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/connection.py", line 275, in connect
return self._ensure_connection(
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/connection.py", line 434, in _ensure_connection
return retry_over_time(
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/utils/functional.py", line 312, in retry_over_time
return fun(*args, **kwargs)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/connection.py", line 878, in _connection_factory
self._connection = self._establish_connection()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/connection.py", line 813, in _establish_connection
conn = self.transport.establish_connection()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/kombu/transport/pyamqp.py", line 201, in establish_connection
conn.connect()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/amqp/connection.py", line 323, in connect
self.transport.connect()
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/amqp/transport.py", line 113, in connect
self._connect(self.host, self.port, self.connect_timeout)
File "/Users/tomparker-shemilt/.virtualenvs/case-cards/lib/python3.9/site-packages/amqp/transport.py", line 197, in _connect
self.sock.connect(sa)
ConnectionRefusedError: [Errno 61] Connection refused
```
I tried messing around with locally editing https://github.com/celery/celery/blob/master/celery/bin/worker.py and noted the
```
worker.start()
return worker.exitcode
except SecurityError as e:
ctx.obj.error(e.args[0])
ctx.exit(1)
```
block at the end. If I replaced `return worker.exitcode` with `ctx.exit(worker.exitcode)` it seems to work as I was expecting i.e. exit code of 1
| Hey @palfrey :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
are you interested to come up with a possible draft fix? | 2022-06-01T10:48:40 |
celery/celery | 7,553 | celery__celery-7553 | [
"3964"
] | 13bd136871d1954a56b5f4300bfdfac396070b1c | diff --git a/celery/utils/functional.py b/celery/utils/functional.py
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -311,7 +311,7 @@ def head_from_fun(fun, bound=False, debug=False):
# with an empty body, meaning it has the same performance as
# as just calling a function.
is_function = inspect.isfunction(fun)
- is_callable = hasattr(fun, '__call__')
+ is_callable = callable(fun)
is_cython = fun.__class__.__name__ == 'cython_function_or_method'
is_method = inspect.ismethod(fun)
| Use callable in utils.functional.head_from_fun
After the discussion in #3952, we should investigate whether an improvement can be applied using the builtin `callable` instead of `hasattr(fun, '__call__')`.
https://docs.python.org/2/library/functions.html#callable
| Hi. I'm a new contributor. May I tackle this issue?
yes please, open a PR and ping me for review | 2022-06-07T12:37:03 |
|
celery/celery | 7,555 | celery__celery-7555 | [
"7523"
] | 0a783edd229783d834caa2a9dd8c79647a391cbd | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -96,6 +96,18 @@ class Context:
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
+ if self.headers is None:
+ self.headers = self._get_custom_headers(*args, **kwargs)
+
+ def _get_custom_headers(self, *args, **kwargs):
+ headers = {}
+ headers.update(*args, **kwargs)
+ celery_keys = {*Context.__dict__.keys(), 'lang', 'task', 'argsrepr', 'kwargsrepr'}
+ for key in celery_keys:
+ headers.pop(key, None)
+ if not headers:
+ return None
+ return headers
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -217,6 +217,16 @@ def retry_once_priority(self, *args, expires=60.0, max_retries=1,
max_retries=max_retries)
+@shared_task(bind=True, max_retries=1)
+def retry_once_headers(self, *args, max_retries=1,
+ countdown=0.1):
+ """Task that fails and is retried. Returns headers."""
+ if self.request.retries:
+ return self.request.headers
+ raise self.retry(countdown=countdown,
+ max_retries=max_retries)
+
+
@shared_task
def redis_echo(message, redis_key="redis-echo"):
"""Task that appends the message to a redis list."""
| diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -8,7 +8,8 @@
from .conftest import get_active_redis_channels
from .tasks import (ClassBasedAutoRetryTask, ExpectedException, add, add_ignore_result, add_not_typed, fail,
- print_unicode, retry, retry_once, retry_once_priority, return_properties, sleeping)
+ print_unicode, retry, retry_once, retry_once_headers, retry_once_priority, return_properties,
+ sleeping)
TIMEOUT = 10
@@ -267,6 +268,13 @@ def test_task_retried_priority(self, manager):
res = retry_once_priority.apply_async(priority=7)
assert res.get(timeout=TIMEOUT) == 7 # retried once with priority 7
+ @flaky
+ def test_task_retried_headers(self, manager):
+ res = retry_once_headers.apply_async(headers={'x-test-header': 'test-value'})
+ headers = res.get(timeout=TIMEOUT)
+ assert headers is not None # retried once with headers
+ assert 'x-test-header' in headers # retry keeps custom headers
+
@flaky
def test_unicode_task(self, manager):
manager.join(
diff --git a/t/unit/tasks/test_context.py b/t/unit/tasks/test_context.py
--- a/t/unit/tasks/test_context.py
+++ b/t/unit/tasks/test_context.py
@@ -63,3 +63,24 @@ def test_context_get(self):
ctx_dict = get_context_as_dict(ctx, getter=Context.get)
assert ctx_dict == expected
assert get_context_as_dict(Context()) == default_context
+
+ def test_extract_headers(self):
+ # Should extract custom headers from the request dict
+ request = {
+ 'task': 'test.test_task',
+ 'id': 'e16eeaee-1172-49bb-9098-5437a509ffd9',
+ 'custom-header': 'custom-value',
+ }
+ ctx = Context(request)
+ assert ctx.headers == {'custom-header': 'custom-value'}
+
+ def test_dont_override_headers(self):
+ # Should not override headers if defined in the request
+ request = {
+ 'task': 'test.test_task',
+ 'id': 'e16eeaee-1172-49bb-9098-5437a509ffd9',
+ 'headers': {'custom-header': 'custom-value'},
+ 'custom-header-2': 'custom-value-2',
+ }
+ ctx = Context(request)
+ assert ctx.headers == {'custom-header': 'custom-value'}
| Custom header lost after using autoretry
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/master/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.2.6 (dawn-chorus)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report
</code> Output:
</b></summary>
<p>
```
software -> celery:5.2.6 (dawn-chorus) kombu:5.2.3 py:3.10.0
billiard:3.6.4.0 py-amqp:5.0.9
platform -> system:Windows arch:64bit, WindowsPE
kernel version:10 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://localhost:6379/0
broker_url: 'amqp://user:********@localhost:5672//'
task_create_missing_queues: True
task_acks_late: True
result_extended: True
result_expires: 3600
worker_send_task_events: True
result_backend: 'redis://localhost:6379/0'
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
1. Create simplest docker compose with redis & rabbitMQ
2. run app with `celery --app=app worker --pool=solo --concurrency=1 -E --loglevel=INFO --queues=my_queue `
3. run `python call.py`
4. You see in retried task headers there in no `"custom_header": "yumy yum"`
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: 3.10.0
* **Minimal Celery Version**: N/A or Unknown
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
Application app.py:
```python
from functools import partial
from typing import Final
from celery import Task, Celery
BASE_CELERY_BROKER_URL = "amqp://user:pass@localhost:5672"
CELERY_RESULT_URL = "redis://localhost:6379/0"
def make_celery_worker_app_stub() -> partial[Celery]:
return partial(
Celery,
task_create_missing_queues=True,
task_acks_late=True,
result_extended=True,
result_expires=3600,
worker_send_task_events=True,
)
app: Final = make_celery_worker_app_stub()(
main=__name__,
result_backend=CELERY_RESULT_URL,
broker="%s" % (BASE_CELERY_BROKER_URL),
set_as_current=False,
)
@app.task(
bind=True,
queue="my_queue",
name="my_name:my_function",
acks_late=True,
task_reject_on_worker_lost=True,
autoretry_for=(Exception,),
retry_backoff=True,
max_retries=3,
task_time_limit=8,
)
def failing_function(
self: Task,
data
) -> None:
print(data) # prints hello
print(self.request.properties["application_headers"])
assert False == True
```
Caller call.py:
```python
from celery import Celery
BASE_CELERY_BROKER_URL = "amqp://user:pass@localhost:5672"
CELERY_RESULT_URL = "redis://localhost:6379/0"
def celery_app(
) -> Celery:
return Celery(
__name__,
result_backend=CELERY_RESULT_URL,
broker="%s" % (BASE_CELERY_BROKER_URL),
task_create_missing_queues=True,
celery_queue_ha_policy="all",
task_acks_late=True,
set_as_current=False,
result_extended=True,
)
client = celery_app()
options = {"headers": {
"custom_header": "yumy yum"
}}
sign = client.signature(
"my_name:my_function",
queue="my_queue",
args=("hello",),
options=options
)
sign.apply_async()
```
Log:
```
[2022-05-12 22:37:20,703: INFO/MainProcess] Connected to amqp://user:**@127.0.0.1:5672//
[2022-05-12 22:37:20,736: INFO/MainProcess] mingle: searching for neighbors
[2022-05-12 22:37:21,764: INFO/MainProcess] mingle: all alone
[2022-05-12 22:37:21,799: INFO/MainProcess] celery@DESKTOP-51C03JP ready.
[2022-05-12 22:38:47,431: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] received
[2022-05-12 22:38:47,432: WARNING/MainProcess] hello
[2022-05-12 22:38:47,432: WARNING/MainProcess] {'lang': 'py', 'task': 'my_name:my_function', 'id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 0, 'timelimit': [None, None], 'root_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'parent_id': None, 'argsrepr': "('hello',)", 'kwargsrepr': '{}', 'origin': 'gen5572@DESKTOP-51C03JP', 'ignore_result': False, 'custom_header': 'yumy yum'}
[2022-05-12 22:38:47,468: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] retry: Retry in 1s: AssertionError()
[2022-05-12 22:38:47,469: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] received
[2022-05-12 22:38:48,446: WARNING/MainProcess] hello
[2022-05-12 22:38:48,447: WARNING/MainProcess] {'lang': 'py', 'task': 'my_name:my_function', 'id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'shadow': None, 'eta': '2022-05-12T20:38:48.433839+00:00', 'expires': None, 'group': None, 'group_index': None, 'retries': 1, 'timelimit': [None, None], 'root_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'parent_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'argsrepr': "('hello',)", 'kwargsrepr': '{}', 'origin': 'gen24336@DESKTOP-51C03JP', 'ignore_result': False}
[2022-05-12 22:38:48,470: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] received
[2022-05-12 22:38:48,473: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] retry: Retry in 1s: AssertionError()
[2022-05-12 22:38:49,464: WARNING/MainProcess] hello
[2022-05-12 22:38:49,465: WARNING/MainProcess] {'lang': 'py', 'task': 'my_name:my_function', 'id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'shadow': None, 'eta': '2022-05-12T20:38:49.447913+00:00', 'expires': None, 'group': None, 'group_index': None, 'retries': 2, 'timelimit': [None, None], 'root_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'parent_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'argsrepr': "('hello',)", 'kwargsrepr': '{}', 'origin': 'gen24336@DESKTOP-51C03JP', 'ignore_result': False}
[2022-05-12 22:38:49,468: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] received
[2022-05-12 22:38:49,469: WARNING/MainProcess] hello
[2022-05-12 22:38:49,469: INFO/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] retry: Retry in 0s: AssertionError()
[2022-05-12 22:38:49,469: WARNING/MainProcess] {'lang': 'py', 'task': 'my_name:my_function', 'id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 3, 'timelimit': [None, None], 'root_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'parent_id': 'bb7a8629-00d4-434d-b9d2-72162c2edbbf', 'argsrepr': "('hello',)", 'kwargsrepr': '{}', 'origin': 'gen24336@DESKTOP-51C03JP', 'ignore_result': False}
[2022-05-12 22:38:49,473: ERROR/MainProcess] Task my_name:my_function[bb7a8629-00d4-434d-b9d2-72162c2edbbf] raised unexpected: AssertionError()
Traceback (most recent call last):
File "C:\Users\liman\Anaconda3\envs\dimensions\lib\site-packages\celery\app\trace.py", line 451, in trace_task
R = retval = fun(*args, **kwargs)
File "C:\Users\liman\Anaconda3\envs\dimensions\lib\site-packages\celery\app\trace.py", line 734, in __protected_call__
return self.run(*args, **kwargs)
File "C:\Users\liman\Anaconda3\envs\dimensions\lib\site-packages\celery\app\autoretry.py", line 54, in run
ret = task.retry(exc=exc, **retry_kwargs)
File "C:\Users\liman\Anaconda3\envs\dimensions\lib\site-packages\celery\app\task.py", line 717, in retry
raise_with_context(exc)
File "C:\Users\liman\Anaconda3\envs\dimensions\lib\site-packages\celery\app\autoretry.py", line 34, in run
return task._orig_run(*args, **kwargs)
File "C:\Users\liman\Github\Tests\randoms\api.py", line 48, in failing_function
assert False == True
AssertionError
```
</p>
</details>
# Expected Behavior
Expecting to keep custom headers when retrying the task
# Actual Behavior
Not keeping custom task headers
| Hey @Leem0sh :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2022-06-09T05:33:48 |
celery/celery | 7,608 | celery__celery-7608 | [
"5836"
] | ec3714edf37e773ca5372f71f7f4ee5b1b33dd5d | diff --git a/celery/apps/beat.py b/celery/apps/beat.py
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -44,7 +44,8 @@ def __init__(self, max_interval=None, app=None,
scheduler=None,
scheduler_cls=None, # XXX use scheduler
redirect_stdouts=None,
- redirect_stdouts_level=None, **kwargs):
+ redirect_stdouts_level=None,
+ quiet=False, **kwargs):
self.app = app = app or self.app
either = self.app.either
self.loglevel = loglevel
@@ -56,6 +57,7 @@ def __init__(self, max_interval=None, app=None,
'worker_redirect_stdouts', redirect_stdouts)
self.redirect_stdouts_level = either(
'worker_redirect_stdouts_level', redirect_stdouts_level)
+ self.quiet = quiet
self.max_interval = max_interval
self.socket_timeout = socket_timeout
@@ -70,8 +72,9 @@ def __init__(self, max_interval=None, app=None,
self.loglevel = LOG_LEVELS[self.loglevel.upper()]
def run(self):
- print(str(self.colored.cyan(
- f'celery beat v{VERSION_BANNER} is starting.')))
+ if not self.quiet:
+ print(str(self.colored.cyan(
+ f'celery beat v{VERSION_BANNER} is starting.')))
self.init_loader()
self.set_process_title()
self.start_scheduler()
@@ -93,7 +96,8 @@ def start_scheduler(self):
schedule_filename=self.schedule,
)
- print(self.banner(service))
+ if not self.quiet:
+ print(self.banner(service))
self.setup_logging()
if self.socket_timeout:
diff --git a/celery/bin/beat.py b/celery/bin/beat.py
--- a/celery/bin/beat.py
+++ b/celery/bin/beat.py
@@ -62,7 +62,8 @@ def beat(ctx, detach=False, logfile=None, pidfile=None, uid=None,
maybe_drop_privileges(uid=uid, gid=gid)
beat = partial(app.Beat,
- logfile=logfile, pidfile=pidfile, **kwargs)
+ logfile=logfile, pidfile=pidfile,
+ quiet=ctx.obj.quiet, **kwargs)
if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
diff --git a/t/unit/bin/proj/scheduler.py b/t/unit/bin/proj/scheduler.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/proj/scheduler.py
@@ -0,0 +1,6 @@
+from celery.beat import Scheduler
+
+
+class mScheduler(Scheduler):
+ def tick(self):
+ raise Exception
| diff --git a/t/unit/bin/test_beat.py b/t/unit/bin/test_beat.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/test_beat.py
@@ -0,0 +1,34 @@
+import pytest
+from click.testing import CliRunner
+
+from celery.app.log import Logging
+from celery.bin.celery import celery
+
+
[email protected](scope='session')
+def use_celery_app_trap():
+ return False
+
+
+def test_cli(isolated_cli_runner: CliRunner):
+ Logging._setup = True # To avoid hitting the logging sanity checks
+ res = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "beat", "-S", "t.unit.bin.proj.scheduler.mScheduler"],
+ catch_exceptions=True
+ )
+ assert res.exit_code == 1, (res, res.stdout)
+ assert res.stdout.startswith("celery beat")
+ assert "Configuration ->" in res.stdout
+
+
+def test_cli_quiet(isolated_cli_runner: CliRunner):
+ Logging._setup = True # To avoid hitting the logging sanity checks
+ res = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "--quiet", "beat", "-S", "t.unit.bin.proj.scheduler.mScheduler"],
+ catch_exceptions=True
+ )
+ assert res.exit_code == 1, (res, res.stdout)
+ assert not res.stdout.startswith("celery beat")
+ assert "Configuration -> " not in res.stdout
| celery worker has --quiet to suppress banner output but celery beat does not
Sorry for leaving out the issue template but I believe this is fairly trivial and straight-forward.
In master, there is code to enable the suppressing of printing the banner when running `celery worker` with `--quiet`:
https://github.com/celery/celery/blob/9773eba837982c84380c93bd3788470273e7674d/celery/apps/worker.py#L138-L139
This conditional is not present in the code that runs `celery beat`:
https://github.com/celery/celery/blob/9773eba837982c84380c93bd3788470273e7674d/celery/apps/beat.py#L77-L78
https://github.com/celery/celery/blob/9773eba837982c84380c93bd3788470273e7674d/celery/apps/beat.py#L100
This causes a few issues for us because we expect all our services to only emit JSON.
| do you have the time to send improvements?
I do, with a little guidance. Do I just need to add the `quiet` kwarg to `on_before_init` in `beat.py`? Or is there something more I need to do to make sure that I can figure out whether `--quiet` was passed or not?
ping :)
> I do, with a little guidance. Do I just need to add the `quiet` kwarg to `on_before_init` in `beat.py`? Or is there something more I need to do to make sure that I can figure out whether `--quiet` was passed or not?
you really need to dig the code to figure out that | 2022-07-05T02:45:37 |
celery/celery | 7,609 | celery__celery-7609 | [
"3576"
] | ec3714edf37e773ca5372f71f7f4ee5b1b33dd5d | diff --git a/celery/worker/state.py b/celery/worker/state.py
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -32,18 +32,18 @@
}
#: maximum number of revokes to keep in memory.
-REVOKES_MAX = 50000
+REVOKES_MAX = int(os.environ.get('CELERY_WORKER_REVOKES_MAX', 50000))
#: maximum number of successful tasks to keep in memory.
-SUCCESSFUL_MAX = 1000
+SUCCESSFUL_MAX = int(os.environ.get('CELERY_WORKER_SUCCESSFUL_MAX', 1000))
#: how many seconds a revoke will be active before
#: being expired when the max limit has been exceeded.
-REVOKE_EXPIRES = 10800
+REVOKE_EXPIRES = float(os.environ.get('CELERY_WORKER_REVOKE_EXPIRES', 10800))
#: how many seconds a successful task will be cached in memory
#: before being expired when the max limit has been exceeded.
-SUCCESSFUL_EXPIRES = 10800
+SUCCESSFUL_EXPIRES = float(os.environ.get('CELERY_WORKER_SUCCESSFUL_EXPIRES', 10800))
#: Mapping of reserved task_id->Request.
requests = {}
| diff --git a/t/unit/worker/test_state.py b/t/unit/worker/test_state.py
--- a/t/unit/worker/test_state.py
+++ b/t/unit/worker/test_state.py
@@ -1,4 +1,7 @@
+import os
import pickle
+import sys
+from importlib import import_module
from time import time
from unittest.mock import Mock, patch
@@ -187,3 +190,32 @@ def test_ready(self, requests=[SimpleReq('foo'),
for request in requests:
state.task_ready(request)
assert len(state.active_requests) == 0
+
+
+class test_state_configuration():
+
+ @staticmethod
+ def import_state():
+ with patch.dict(sys.modules):
+ del sys.modules['celery.worker.state']
+ return import_module('celery.worker.state')
+
+ @patch.dict(os.environ, {
+ 'CELERY_WORKER_REVOKES_MAX': '50001',
+ 'CELERY_WORKER_SUCCESSFUL_MAX': '1001',
+ 'CELERY_WORKER_REVOKE_EXPIRES': '10801',
+ 'CELERY_WORKER_SUCCESSFUL_EXPIRES': '10801',
+ })
+ def test_custom_configuration(self):
+ state = self.import_state()
+ assert state.REVOKES_MAX == 50001
+ assert state.SUCCESSFUL_MAX == 1001
+ assert state.REVOKE_EXPIRES == 10801
+ assert state.SUCCESSFUL_EXPIRES == 10801
+
+ def test_default_configuration(self):
+ state = self.import_state()
+ assert state.REVOKES_MAX == 50000
+ assert state.SUCCESSFUL_MAX == 1000
+ assert state.REVOKE_EXPIRES == 10800
+ assert state.SUCCESSFUL_EXPIRES == 10800
| make REVOKES_MAX and REVOKE_EXPIRES configurable
Values of REVOKE_EXPIRES and REVOKES_MAX in worker/state.py are hardcoded.
This should be configurable. Some of us really needed to change this.
| any suggested changes on your mind? feel free send a PR
will try, thanks for pushing celery forward :)
I think this can be programmatically supported via adding `control` commands, similar to the following:
```python
from celery.worker.control import control_command
from celery.worker import state as worker_state
@control_command(
args=[('n', int)],
signature='[N={}]'.format(worker_state.REVOKES_MAX), # <- used for help on the command-line.
)
def revokes_max(state, n=worker_state.REVOKES_MAX):
if n != worker_state.revoked.maxlen:
prev_max_len = worker_state.revoked.maxlen
worker_state.revoked.maxlen = n
if n < prev_max_len:
worker_state.revoked.purge()
return {'ok': 'updated revoked task max length.'}
@control_command(
args=[('n', int)],
signature='[N={}]'.format(worker_state.REVOKE_EXPIRES), # <- used for help on the command-line.
)
def revokes_expires(state, n=worker_state.REVOKE_EXPIRES):
if n != worker_state.revoked.expires:
prev_expires = worker_state.revoked.expires
worker_state.revoked.expires = n
if n < prev_expires:
worker_state.revoked.purge()
return {'ok': 'updated revoked task expiration.'}
```
Of course, _not_ having to call control commands right after a deploy of workers comes up, and instead make it configurable like many other parameters in the conf, would be ideal. But, this is a stop-gap (and also allows it to be changed on the fly, which is at least minorly useful). | 2022-07-05T02:46:40 |
celery/celery | 7,652 | celery__celery-7652 | [
"3589"
] | b96ab282a8a2ea3d97d034f862e9fd6aceb0a0b5 | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -253,7 +253,7 @@ def __init__(self, main=None, loader=None, backend=None,
self._pending_periodic_tasks = deque()
self.finalized = False
- self._finalize_mutex = threading.Lock()
+ self._finalize_mutex = threading.RLock()
self._pending = deque()
self._tasks = tasks
if not isinstance(self._tasks, TaskRegistry):
| add_periodic_task function does not trigger the task
## Checklist
* [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
* [x] I have verified that the issue exists against the `master` branch of Celery.
## Steps to reproduce
**tasks.py**
```
from celery import Celery
celery = Celery('tasks', broker='pyamqp://guest@localhost//')
@celery.task
def add(x, y):
return x + y
@celery.on_after_configure.connect
def add_periodic():
celery.add_periodic_task(10.0, add.s(2,3), name='add every 10')
if __name__ == '__main__':
add_periodic()
```
step 1: rabbitmq is up
rabbitmq 1186 1 0 Nov12 ? 00:00:00 /bin/sh /usr/sbin/rabbitmq-server
step2: execute tasks.py
`python tasks.py
`
step3: start beat worker
`celery -A tasks -l info beat`
celery beat v4.0.0 (latentcall) is starting.
__ - ... __ - _
LocalTime -> 2016-11-12 17:37:58
Configuration ->
. broker -> amqp://guest:**@localhost:5672//
. loader -> celery.loaders.app.AppLoader
. scheduler -> celery.beat.PersistentScheduler
. db -> celerybeat-schedule
. logfile -> [stderr]@%INFO
. maxinterval -> 5.00 minutes (300s)
[2016-11-12 17:37:58,912: INFO/MainProcess] beat: Starting...
## Expected behavior
I expect the scheduler to trigger add() function every ten seconds.
## Actual behavior
The add() function doesn't get triggered.
I don't see any exception in the terminal. Do I miss anything?
| I had the same problem :(
Your example works well for me.
NOTE: Your signal handler needs to accept **kwargs, failing to do so will be an error in the future.
Using your example
```py
# file: tasks.py
from celery import Celery
celery = Celery('tasks', broker='pyamqp://guest@localhost//')
@celery.task
def add(x, y):
return x + y
@celery.on_after_configure.connect
def add_periodic(**kwargs):
celery.add_periodic_task(10.0, add.s(2,3), name='add every 10')
```
I start the beat service as follows:
```
$ celery -A tasks beat -l debug
celery beat v4.0.0 (latentcall) is starting.
__ - ... __ - _
LocalTime -> 2016-12-01 11:54:56
Configuration ->
. broker -> amqp://guest:**@localhost:5672//
. loader -> celery.loaders.app.AppLoader
. scheduler -> celery.beat.PersistentScheduler
. db -> celerybeat-schedule
. logfile -> [stderr]@%DEBUG
. maxinterval -> 5.00 minutes (300s)
[2016-12-01 11:54:56,511: DEBUG/MainProcess] Setting default socket timeout to 30
[2016-12-01 11:54:56,511: INFO/MainProcess] beat: Starting...
[2016-12-01 11:54:56,517: DEBUG/MainProcess] Current schedule:
<ScheduleEntry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)>
<ScheduleEntry: add every 10 tasks.add(2, 3) <freq: 10.00 seconds>
[2016-12-01 11:54:56,517: DEBUG/MainProcess] beat: Ticking with max interval->5.00 minutes
[2016-12-01 11:54:56,528: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'product': 'RabbitMQ', 'copyright': 'Copyright (C) 2007-2016 Pivotal Software, Inc.', 'capabilities': {'exchange_exchange_bindings': True, 'connection.blocked': True, 'authentication_failure_close': True, 'direct_reply_to': True, 'basic.nack': True, 'per_consumer_qos': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'publisher_confirms': True}, 'cluster_name': 'rabbit@grain', 'platform': 'Erlang/OTP', 'version': '3.6.4'}, mechanisms: [u'AMQPLAIN', u'PLAIN'], locales: [u'en_US']
[2016-12-01 11:54:56,531: INFO/MainProcess] Scheduler: Sending due task add every 10 (tasks.add)
[2016-12-01 11:54:56,534: DEBUG/MainProcess] using channel_id: 1
[2016-12-01 11:54:56,535: DEBUG/MainProcess] Channel open
[2016-12-01 11:54:56,537: DEBUG/MainProcess] beat: Synchronizing schedule...
[2016-12-01 11:54:56,537: DEBUG/MainProcess] tasks.add sent. id->af224838-cf72-4d0d-9076-1c39cdbeffb8
[2016-12-01 11:54:56,537: DEBUG/MainProcess] beat: Waking up in 9.97 seconds.
[2016-12-01 11:55:06,519: INFO/MainProcess] Scheduler: Sending due task add every 10 (tasks.add)
[2016-12-01 11:55:06,520: DEBUG/MainProcess] tasks.add sent. id->907cf307-e36f-455a-97a8-441c79b8ab92
```
Hi, I have the same issue. But I try to start celery programmatically in a thread. maybe it is the cause.
This is my thread:
```python
from __future__ import absolute_import, unicode_literals
import threading
from celery import current_app
from celery.bin import worker
app = current_app._get_current_object()
class CeleryThread(threading.Thread):
def __init__(self):
super(CeleryThread, self).__init__()
self.app = app
self.worker = worker.worker(app=self.app)
self.options = {
'broker': 'amqp://guest:guest@localhost:5672//',
'loglevel': 'INFO',
'traceback': True,
}
app.add_periodic_task(5.0, test.s('hello'), name='add every 10')
def run(self):
self.worker.run(**self.options)
@app.task
def test(args1):
print args1
```
And the main.py to launch this
```python
celery_thread = CeleryThread()
# used to kill the thread when the main program stop
# celery_thread.daemon = True
celery_thread.start()
```
My console output is
```
-------------- celery@ubuntu v4.0.0 (latentcall)
---- **** -----
--- * *** * -- Linux-4.4.0-51-generic-x86_64-with-Ubuntu-16.04-xenial 2016-12-03 14:33:10
-- * - **** ---
- ** ---------- [config]
- ** ---------- .> app: default:0x7f75775bfc50 (.default.Loader)
- ** ---------- .> transport: amqp://guest:**@localhost:5672//
- ** ---------- .> results: disabled://
- *** --- * --- .> concurrency: 4 (prefork)
-- ******* ---- .> task events: OFF (enable -E to monitor tasks in this worker)
--- ***** -----
-------------- [queues]
.> celery exchange=celery(direct) key=celery
[tasks]
. kalliope.core.CrontabManager2.CeleryThread.test
[2016-12-03 14:33:10,458: INFO/MainProcess] Connected to amqp://guest:**@127.0.0.1:5672//
[2016-12-03 14:33:10,466: INFO/MainProcess] mingle: searching for neighbors
[2016-12-03 14:33:11,486: INFO/MainProcess] mingle: all alone
[2016-12-03 14:33:11,515: INFO/MainProcess] celery@ubuntu ready.
```
Do I forget an option? I can see you have a "scheduler" set in your output @ask
Thanks by advance for any help.
The same config with @liuallen1981 and the same issue. Anyone figures out what's happening ?. For now I have to use
```
celery.conf.beat_schedule = {
'do-something-periodically': {
'task': 'tasks.my_task',
'schedule': 3.0,
},
}
```
instead of using a `setup_periodic_tasks` function with `on_after_configure.connect` decorator.
+1 Also having this issue.
+1 Also having this issue.
Celery version 4.0.2 (latentcall)
+1 Also having this issue.
+1 Also having this issue. Went on and tested with @liuallen1981's code and get the same result as with my own code.
Celery: 4.0.2
To run periodic tasks, you have to invoke also scheduler when starting a worker using `-B` option:
`celery -A proj worker -B`
When using celery in django applications, where tasks are autodiscovered from apps, you need to use `on_after_finalize` signal instead of `on_after_configure`.
References:
http://stackoverflow.com/questions/40712678/setup-periodic-task/40727526
http://stackoverflow.com/questions/41119053/connect-new-celery-periodic-task-in-django
-B is not for production and simply starts the Beats scheduler which at least in my case is already running.
+1 having the same issue with Celery(4.0.2)
Same issue here....
you just start a beat service, should also start a worker to do the task.
+1
same issue here
same issue here,
and I try to print something inside the callback, seems the callback haven't been called, but the RabbitMQ is working (works fine when I trigger a task in code )
```py
@celery.on_after_configure.connect
def setup_periodic_tasks(**kwargs):
print('after connect')
```
```
(py35) ➜ celery -A celery.beat beat
celery beat v4.0.2 (latentcall) is starting.
__ - ... __ - _
LocalTime -> 2017-08-08 02:42:18
Configuration ->
. broker -> amqp://**:**@**:5672//
. loader -> celery.loaders.app.AppLoader
. scheduler -> celery.beat.PersistentScheduler
. db -> celerybeat-schedule
. logfile -> [stderr]@%WARNING
. maxinterval -> 5.00 minutes (300s)
```
I use Celery config `celery.conf.beat_schedule` instead of dynamically `add_periodic_task` to solve this, since I don't have to set schedule dynamically, but still don't know why this issue is happening.
I stepped through the library and found that my signal listener was being created/attached after the `on_after_configure` signal was fired. (I was placing my signal listener in `app/tasks.py` and it was not working.)
I reasoned that Django's app ready signal would probably execute after Celery configuration and it is working well for me so far.
NOTE: I am not sure what celery configuration actually entails and whether it is possible that app.ready could fire before Celery is configured... however, I expect it would at least throw a runtime error.
Sample code from my `app/apps.py`:
```
from django.apps import AppConfig
import django.db.utils
from celery_app import app as celery_app
from celery.schedules import crontab
import utils.cron
class MyAppConfig(AppConfig):
name = 'app'
verbose_name = "MyApp"
def ready(self):
print("MyAppConfig.ready invoked.")
import app.signals
print("* * * Setting up periodic tasks!")
import app.models
import app.tasks
for cron in app.models.CronTask.objects.all():
celery_app.add_periodic_task(
crontab(**utils.cron.parse_crontab_expression_to_celery(cron.frequency)),
app.tasks.do_cron.s(cron.id),
name='do cron'
)
```
Note you also need to point `INSTALLED_APPS` to using your new `AppConfig` in `settings.py`:
```
INSTALLED_APPS = [
# ...
'app.apps.MyAppConfig',
]
```
---
A good approach or *fix* would probably be to write a new decorator that 1) checks if Celery is already configured and if so executes immediately and 2) if Celery is not configured adds the listener using the `@celery.on_after_configure.connect`.
As it stands, the docs are problematic since so many of us ran into this issue.
CCing @rustanacexd @viennadd just so you can try this fix if you still need to dynamically schedule tasks?
Putting my two cents out there, I got bit by this and ended up having to reorder some of my tasks. We have about 8 scheduled tasks that are supposed to fire, however, I noticed that the following would happen:
Example:
```
@celery.on_after_configure.connect
def setup_periodic_tasks(**kwargs):
celery.add_periodic_task(5.0, do_thing_b.s(), name='Test B')
celery.add_periodic_task(4.9, do_thing_a.s(), name='Test A')
```
Ordering them like this means that `do_thing_a` would never fire, as it would be overwritten by `do_thing_b`. Originally they were both set to `5`, although only one would fire (I believe in this case it would have been B as it was added first). Next what I did was change it to a decimal and offset it by `.1` to see if that would fix it. No dice. Then I ordered them so the lower one would fire first and the higher one would fire second and that ended up fixing it. I.e:
```
@celery.on_after_configure.connect
def setup_periodic_tasks(**kwargs):
celery.add_periodic_task(4.9, do_thing_b.s(), name='Test B')
celery.add_periodic_task(5.0, do_thing_a.s(), name='Test A')
```
We're also using some `crontab()`s, though those are sort of a mystery to get running as some work and some don't, I suspect it is the same issue as above. I haven't completely played around with it, as those intervals are generally set to occur every X hours/days, so I usually forget they exist.
Maybe this kind of behavior is mentioned in the documentation, or I'm going down a different rabbit hole, though this behavior doesn't make much sense. For reference, we're using Redis instead of RMQ and celery 4.1.0.
I was able to make this work. Check my answer here:
https://stackoverflow.com/a/46965132/560945
@prasanna-balaraman That does seem to work, thank you for the suggestion!
Same issue for me : I will test the another solution : https://stackoverflow.com/a/41119054/6149867
closing. if it still appears and any one have any code or docs suggestions plz feel free to send a pr referencing this issue.
It took me a while to realize that if there is any exception raised in setup_periodic_tasks, it will get silently suppressed.
The function is called here: https://github.com/celery/celery/blob/master/celery/app/base.py#L950
If anything goes wrong, the exception is only saved in responses, no re-raise or log:
https://github.com/celery/celery/blob/master/celery/utils/dispatch/signal.py#L276
So my suggestion is to keep setup_periodic_tasks as simple as possible.
Hope this helps!
@chiang831 do you have any suggestions to improve it? if so plz send a pr or open a discussion on celery-users mailing list
Defining them in `on_after_finalize` is what worked for me (non-django celery app).
```python
@app.on_after_finalize.connect
def app_ready(**kwargs):
"""
Called once after app has been finalized.
"""
sender = kwargs.get('sender')
# periodic tasks
speed = 15
sender.add_periodic_task(speed, update_leases.s(),
name='update leases every {} seconds'.format(speed))
```
Just ran into this and none of the previous solutions worked for me. The exact scenarios that cause this are confusing and rely on the behavior of ref-counting/gc and the exact lifetimes of your decorated functions.
Signal.connect by default only holds a weak reference to the signal handler. This makes sense for other use cases of the Signal object (a short lived object that wires signals shouldn't be held alive by its signal handlers), but is very surprising in this case.
My specific use case was a decorator to make it easy to add new periodic tasks:
```python
def call_every_5_min(task):
@app.on_after_configure.connect
def register_task(sender, **_):
sender.add_periodic_task(collect_every_m*60, task.signature())
@call_every_5_min
@task
def my_celery_task(_):
pass
```
The fix is to explicitly ask for a strong reference:
```python
def call_every_5_min(task):
def register_task(sender, **_):
sender.add_periodic_task(collect_every_m*60, task.signature())
app.on_after_configure.connect(register_task, weak=False)
```
The example in the docs **only** works if your decorated function is at module or class scope, in which case the module or class continues to hold a strong reference to the function. Otherwise the only strong reference will die at the end of the scope it's defined in.
I recommend changing the docs to pass `weak=False`, which should work in the cases listed above. I have not explicitly tested this in a Django context though.
> To run periodic tasks, you have to invoke also scheduler when starting a worker using `-B` option:
>
> `celery -A proj worker -B`
>
> When using celery in django applications, where tasks are autodiscovered from apps, you need to use `on_after_finalize` signal instead of `on_after_configure`.
>
> References:
> http://stackoverflow.com/questions/40712678/setup-periodic-task/40727526
> http://stackoverflow.com/questions/41119053/connect-new-celery-periodic-task-in-django
My process of `python -m celery -A app_name worker -l info --autoscale=20,5 -BE` blocked at the end of `app_name.celery.py` when use `on_after_finalize`.
> The same config with @liuallen1981 and the same issue. Anyone figures out what's happening ?. For now I have to use
>
> ```
> celery.conf.beat_schedule = {
> 'do-something-periodically': {
> 'task': 'tasks.my_task',
> 'schedule': 3.0,
> },
> }
> ```
>
> instead of using a `setup_periodic_tasks` function with `on_after_configure.connect` decorator.
This works for me instead.
if you're trying to solve this issue then reboot your docker engine first, it may be signals system bug
should we close this issue as not a bug?
@auvipy not sure. Looks like it's a celery bug
It is a bug we must fix.
+1 Tried all above solutions for celery beat v5.1.0, nothing worked.
Came across this when I was debugging an issue with one of my apps. I believe my problem was caused by the use of the `include=` flag to `celery.Celery` which causes my task modules to be loaded after the `on_after_configure` signal has been sent. Output with a few debug lines:
```
celery -A chopstix.orchestrate.app beat -l DEBUG INSERT .venv 2.31 13:28:19
Making on_configure signal
Signal app.on_configure.send()
self.receivers=[]
Signal app.on_after_configure.send()
self.receivers=[]
celery beat v5.1.1 (sun-harmonics) is starting.
Signal import_modules.send()
self.receivers=[]
IMPORTING beat task module
Signal app.on_after_finalize.send()
self.receivers=[]
```
The line with `IMPORTING` is where my task modules are imported, and where I attempted to use `app.on_after_configure.connect` as a decorator. Obviously my handler was not called and my periodic task was not configured.
---
From a brief read of the comments here and over at #5045, it seems like the issues are a mix of the following:
* bad function signature for the signal callback (ie. does not accept sender **and** kwargs as pointed out by @ask early on)
* the examples have the right args so I figure there's nothing to do there
* missing a signal
* can happen for many reasons, it bit me today due to late task module loading via `Celery(include=...)`
* ~~best we can do is probably a note in the documentation?~~ this was done in #6031 - could maybe be styled as an rst warning though?
* I don't think rewriting signals to trigger send logic when a connect happens if a send has already happened is appropriate. It's a repeatable signal class, right?
* but maybe the premise of using signals for things which are more like `threading.Event`s is something we should change?
* use of the example code in a nested context resulting in weakrefs dying
* like in https://github.com/celery/celery/issues/3589#issuecomment-431961606
* we could add a caveat to the example about this?
* silent suppression of exceptions in signal receivers
* https://github.com/celery/celery/issues/3589#issuecomment-353130300
* I'm surprised we don't log something when this happens?
* some old, broken misbehaviour?
Is there anything I'm missing? It seems like the work to do is either a few minor changes to doco, or a more significant change to the signals to change one-time signals into something more like event flags.
I gave up on trying to make `add_periodic_task` work with `on_after_configure` or `on_after_finalize`.
Eventually I tried `app.conf.beat_schedule` and it worked at the first try and kept working after every restart.
---
One weird thing to note though, perhaps it could help someone understand what's going on:
I was using `on_after_configure` since the beginning and I couldn't get it working. At some point I tried `on_after_finalize` instead and it worked, I could see the periodic tasks running from the logs. Then I restarted the service and it never worked again.
---
**My original non-working setup, just showing the stuff related to celery:**
Structure
```
project/
__init__.py
celery/
__init__.py
app.py
scheduler.py
tasks/
__init__.py
some_tasks.py
some_other_tasks.py
more_tasks.py
requirements.txt
Dockerfile
docker-compose.yml
```
requirements.txt
```requirements.txt
celery[redis]==5.1.1
```
Dockerfile
```dockerfile
FROM python:3.9.5-slim-buster
ENV PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
```
docker-compose.yml
```yml
version: "3.9"
services:
redis:
image: redis:6.2.4-alpine
volumes:
- /tmp/redis:/data
celery:
build: .
command: celery -A project.celery.app worker -l info --uid=nobody --gid=nogroup
depends_on:
- redis
celery-beat:
build: .
command: celery -A project.celery.app beat -l info --uid=nobody --gid=nogroup -s /tmp/celerybeat-schedule
volumes:
- /tmp:/tmp
depends_on:
- celery
```
project/celery/app.py
```python
from celery import Celery
app = Celery(
"tasks",
broker=REDIS_CONN_STRING,
backend=REDIS_CONN_STRING,
include=[
"project.celery.tasks.some_tasks",
"project.celery.tasks.some_other_tasks",
"project.celery.tasks.more_tasks",
],
)
```
project/celery/scheduler.py
```python
from .app import app
from .tasks.some_tasks import a_task
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(5.0, a_task.s(), name="a task")
```
project/celery/tasks/some_tasks.py
```python
import logging
from ..app import app
@app.task
def a_task():
logging.info("hello")
```
**My current working setup, just showing the changes:**
project/celery/app.py
```python
from celery import Celery
app = Celery(
"tasks",
broker=REDIS_CONN_STRING,
backend=REDIS_CONN_STRING,
include=[
"project.celery.tasks.some_tasks",
"project.celery.tasks.some_other_tasks",
"project.celery.tasks.more_tasks",
],
)
app.conf.beat_schedule = {
"a task": {
"task": "project.celery.tasks.some_tasks.a_task",
"schedule": 5.0,
},
}
```
project/celery/scheduler.py deleted
Thank you @ggregoire. I had an issue where the `on_after_configure` would schedule tasks which were defined in the app's `tasks.py` correctly on every restart but the `on_after_finalize` would not schedule tasks defined in other files.
Using `app.conf.beat_schedule` solved this.
I had the problem with `on_after_finalize` as well. The signal wasn't triggered at all.
I defined `setup_periodic_tasks` in separate module and imported celery app. So my code looked like this:
```
from .celery import app
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
print("Setup tasks schedule.")
```
It turned out that `setup_periodic_tasks` wasn't triggered because of relative import. When I used absolute import and changed `from .celery import app` line to `from full.project.path.celery import app` everything started working correctly.
Looks like with relative import celery app was instantiated twice. And `setup_periodic_tasks` handler was connected to the second app instance which wasn't used by celery worker.
Hope this helps somebody.
I had:
```
app = Celery()
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
...
```
For me personally, the fix was to move `setup_periodic_tasks()` before the `config_from_object()`:
```
app = Celery()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
...
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
```
In case it helps someone. | 2022-07-28T05:04:43 |
|
celery/celery | 7,680 | celery__celery-7680 | [
"7645"
] | bdbf6d6ae1aca9addd81800b5dd2e8c3477afb18 | diff --git a/celery/security/certificate.py b/celery/security/certificate.py
--- a/celery/security/certificate.py
+++ b/celery/security/certificate.py
@@ -4,7 +4,7 @@
import os
from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509 import load_pem_x509_certificate
from kombu.utils.encoding import bytes_to_str, ensure_bytes
@@ -25,12 +25,15 @@ def __init__(self, cert):
self._cert = load_pem_x509_certificate(
ensure_bytes(cert), backend=default_backend())
+ if not isinstance(self._cert.public_key(), rsa.RSAPublicKey):
+ raise ValueError("Non-RSA certificates are not supported.")
+
def has_expired(self):
"""Check if the certificate has expired."""
return datetime.datetime.utcnow() >= self._cert.not_valid_after
- def get_pubkey(self):
- """Get public key from certificate."""
+ def get_pubkey(self) -> rsa.RSAPublicKey:
+ """Get public key from certificate. Public key type is checked in __init__."""
return self._cert.public_key()
def get_serial_number(self):
diff --git a/celery/security/key.py b/celery/security/key.py
--- a/celery/security/key.py
+++ b/celery/security/key.py
@@ -1,7 +1,7 @@
"""Private keys for the security serializer."""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
-from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives.asymmetric import padding, rsa
from kombu.utils.encoding import ensure_bytes
from .utils import reraise_errors
@@ -21,6 +21,9 @@ def __init__(self, key, password=None):
password=ensure_bytes(password),
backend=default_backend())
+ if not isinstance(self._key, rsa.RSAPrivateKey):
+ raise ValueError("Non-RSA keys are not supported.")
+
def sign(self, data, digest):
"""Sign string containing data."""
with reraise_errors('Unable to sign data: {0!r}'):
diff --git a/t/unit/security/__init__.py b/t/unit/security/__init__.py
--- a/t/unit/security/__init__.py
+++ b/t/unit/security/__init__.py
@@ -105,3 +105,33 @@
e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS
WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw==
-----END CERTIFICATE-----"""
+
+CERT_ECDSA = """-----BEGIN CERTIFICATE-----
+MIIDTTCCATWgAwIBAgIBCTANBgkqhkiG9w0BAQsFADANMQswCQYDVQQGEwJGSTAe
+Fw0yMjA4MDQwOTA5MDlaFw0yNTA0MzAwOTA5MDlaMCMxCzAJBgNVBAYTAkZJMRQw
+EgYDVQQDDAtUZXN0IFNlcnZlcjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABIZV
+GFM0uPbXehT55s2yq3Zd7tCvN6GMGpE2+KSZqTtDP5c7x23QvBYF6q/T8MLNWCSB
+TxaERpvt8XL+ksOZ8vSjbTBrMB0GA1UdDgQWBBRiY7qDBo7KAYJIn3qTMGAkPimO
+6TAyBgNVHSMEKzApoRGkDzANMQswCQYDVQQGEwJGSYIUN/TljutVzZQ8GAMSX8yl
+Fy9dO/8wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQELBQADggIB
+AKADv8zZvq8TWtvEZSmf476u+sdxs1hROqqSSJ0M3ePJq2lJ+MGI60eeU/0AyDRt
+Q5XAjr2g9wGY3sbA9uYmsIc2kaF+urrUbeoGB1JstALoxviGuM0EzEf+wK5/EbyA
+DDMg9j7b51CBMb3FjkiUQgOjM/u5neYpFxF0awXm4khThdOKTFd0FLVX+mcaKPZ4
+dkLcM/0NL25896DBPN982ObHOVqQjtY3sunXVuyeky8rhKmDvpasYu9xRkzSJBp7
+sCPnY6nsCexVICbuI+Q9oNT98YjHipDHQU0U/k/MvK7K/UCY2esKAnxzcOqoMQhi
+UjsKddXQ29GUEA9Btn9QB1sp39cR75S8/mFN2f2k/LhNm8j6QeHB4MhZ5L2H68f3
+K2wjzQHMZUrKXf3UM00VbT8E9j0FQ7qjYa7ZnQScvhTqsak2e0um8tqcPyk4WD6l
+/gRrLpk8l4x/Qg6F16hdj1p5xOsCUcVDkhIdKf8q3ZXjU2OECYPCFVOwiDQ2ngTf
+Se/bcjxgYXBQ99rkEf0vxk47KqC2ZBJy5enUxqUeVbbqho46vJagMzJoAmzp7yFP
+c1g8aazOWLD2kUxcqkUn8nv2HqApfycddz2O7OJ5Hl8e4vf+nVliuauGzImo0fiK
+VOL9+/r5Kek0fATRWdL4xtbB7zlk+EuoP9T5ZoTYlf14
+-----END CERTIFICATE-----"""
+
+KEY_ECDSA = """-----BEGIN EC PARAMETERS-----
+BggqhkjOPQMBBw==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIOj98rAhc4ToQkHby+Iegvhm3UBx+3TwpfNza+2Vn8d7oAoGCCqGSM49
+AwEHoUQDQgAEhlUYUzS49td6FPnmzbKrdl3u0K83oYwakTb4pJmpO0M/lzvHbdC8
+FgXqr9Pwws1YJIFPFoRGm+3xcv6Sw5ny9A==
+-----END EC PRIVATE KEY-----"""
| diff --git a/t/unit/security/test_certificate.py b/t/unit/security/test_certificate.py
--- a/t/unit/security/test_certificate.py
+++ b/t/unit/security/test_certificate.py
@@ -8,7 +8,7 @@
from celery.security.certificate import Certificate, CertStore, FSCertStore
from t.unit import conftest
-from . import CERT1, CERT2, KEY1
+from . import CERT1, CERT2, CERT_ECDSA, KEY1
from .case import SecurityCase
@@ -29,6 +29,8 @@ def test_invalid_certificate(self):
Certificate(CERT1[:20] + CERT1[21:])
with pytest.raises(SecurityError):
Certificate(KEY1)
+ with pytest.raises(SecurityError):
+ Certificate(CERT_ECDSA)
@pytest.mark.skip('TODO: cert expired')
def test_has_expired(self):
diff --git a/t/unit/security/test_key.py b/t/unit/security/test_key.py
--- a/t/unit/security/test_key.py
+++ b/t/unit/security/test_key.py
@@ -5,7 +5,7 @@
from celery.security.key import PrivateKey
from celery.security.utils import get_digest_algorithm
-from . import CERT1, ENCKEY1, ENCKEY2, KEY1, KEY2, KEYPASSWORD
+from . import CERT1, ENCKEY1, ENCKEY2, KEY1, KEY2, KEY_ECDSA, KEYPASSWORD
from .case import SecurityCase
@@ -32,9 +32,14 @@ def test_invalid_private_key(self):
PrivateKey(ENCKEY2, KEYPASSWORD+b"wrong")
with pytest.raises(SecurityError):
PrivateKey(CERT1)
+ with pytest.raises(SecurityError):
+ PrivateKey(KEY_ECDSA)
def test_sign(self):
pkey = PrivateKey(KEY1)
pkey.sign(ensure_bytes('test'), get_digest_algorithm())
with pytest.raises(AttributeError):
pkey.sign(ensure_bytes('test'), get_digest_algorithm('unknown'))
+
+ # pkey = PrivateKey(KEY_ECDSA)
+ # pkey.sign(ensure_bytes('test'), get_digest_algorithm())
| celery.security.key PrivateKey does not check returned key type
If for some reason the private key used as `broker_use_ssl.keyfile` is not an RSA private key, Celery will crash without telling the root cause, leading to difficult debugging. This affects current celery master branch.
This is due to celery.security.key.PrivateKey `__init__` method not checking the returned key type from `load_pem_private_key` (from `cryptography.hazmat`). If the returned key is, for example, [EllipticCurvePrivateKey](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/#cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey), the `sign` method of PrivateKey later will crash with `TypeError: sign() takes 3 positional arguments but 4 were given` as it expects that `self._key` is instance of `RSAPrivateKey`. Furthermore, this (opaque) exception is followed by many more exceptions of type "TypeError: catching classes that do not inherit from BaseException is not allowed". See reproduction for a complete crash log.
`cryptography` library load_pem_private_key [documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/serialization/) explicitly states that "Many serialization formats support multiple different types of asymmetric keys and will return an instance of the appropriate type. You should check that the returned key matches the type your application expects when using these methods."
I propose that the type of the returned key should be checked and appropriate errors raised if the key type is not supported. This would inform the user that the problem they are facing is not a bug in celery or their implementation, but rather that their certificates are not supported by celery.
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [X] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [X] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/master/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [X] I have included all the versions of all the external dependencies required
to reproduce this bug.
#### Related Issues
- None that I could find
#### Possible Duplicates
- None that I could find
## Environment & Settings
```
$ celery --version
5.0.5 (singularity)
```
`cryptography==37.0.4`
# Steps to Reproduce
```
app = Celery('test', include=[])
broker_use_ssl = {
'keyfile': 'broker_client.key', # private key is ECDSA signed
'certfile': 'broker_client.crt',
'ca_certs': 'root.pem',
'cert_reqs': ssl.CERT_REQUIRED
}
app.conf.update(
broker_use_ssl=broker_use_ssl,
security_key='private.key',
security_certificate='public.crt',
security_cert_store='security_store/*.pem
)
app.setup_security()
app.start()
```
This should result in traceback
```
[2022-07-22 14:16:20,309: CRITICAL/MainProcess] Unrecoverable error: EncodeError(SecurityError("Unable to serialize: TypeError('catching classes that do not inherit from BaseException is not allowed')"))
Traceback (most recent call last):
File "/venv/lib/python3.8/site-packages/celery/security/utils.py", line 24, in reraise_errors
yield
File "/venv/lib/python3.8/site-packages/celery/security/key.py", line 32, in sign
return self._key.sign(ensure_bytes(data), padd, digest)
TypeError: sign() takes 3 positional arguments but 4 were given
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/venv/lib/python3.8/site-packages/celery/security/utils.py", line 24, in reraise_errors
yield
File "/venv/lib/python3.8/site-packages/celery/security/serialization.py", line 39, in serialize
signature=self._key.sign(body, self._digest),
File "/venv/lib/python3.8/site-packages/celery/security/key.py", line 32, in sign
return self._key.sign(ensure_bytes(data), padd, digest)
File "/usr/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/venv/lib/python3.8/site-packages/celery/security/utils.py", line 25, in reraise_errors
except errors as exc:
TypeError: catching classes that do not inherit from BaseException is not allowed
... Above type error repeated many times ...
```
| Hey @qrmt :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
TypeError: catching classes that do not inherit from BaseException is not allowed -- based on this can you work on intended fix? | 2022-08-04T10:57:50 |
celery/celery | 7,734 | celery__celery-7734 | [
"6622"
] | fbae71ca2bc2eb68988131f5719a1dc5807d58fd | diff --git a/celery/backends/dynamodb.py b/celery/backends/dynamodb.py
--- a/celery/backends/dynamodb.py
+++ b/celery/backends/dynamodb.py
@@ -201,28 +201,25 @@ def _get_or_create_table(self):
"""Create table if not exists, otherwise return the description."""
table_schema = self._get_table_schema()
try:
- table_description = self._client.create_table(**table_schema)
- logger.info(
- 'DynamoDB Table {} did not exist, creating.'.format(
- self.table_name
- )
- )
- # In case we created the table, wait until it becomes available.
- self._wait_for_table_status('ACTIVE')
- logger.info(
- 'DynamoDB Table {} is now available.'.format(
- self.table_name
- )
- )
- return table_description
+ return self._client.describe_table(TableName=self.table_name)
except ClientError as e:
error_code = e.response['Error'].get('Code', 'Unknown')
- # If table exists, do not fail, just return the description.
- if error_code == 'ResourceInUseException':
- return self._client.describe_table(
- TableName=self.table_name
+ if error_code == 'ResourceNotFoundException':
+ table_description = self._client.create_table(**table_schema)
+ logger.info(
+ 'DynamoDB Table {} did not exist, creating.'.format(
+ self.table_name
+ )
+ )
+ # In case we created the table, wait until it becomes available.
+ self._wait_for_table_status('ACTIVE')
+ logger.info(
+ 'DynamoDB Table {} is now available.'.format(
+ self.table_name
+ )
)
+ return table_description
else:
raise e
| diff --git a/t/unit/backends/test_dynamodb.py b/t/unit/backends/test_dynamodb.py
--- a/t/unit/backends/test_dynamodb.py
+++ b/t/unit/backends/test_dynamodb.py
@@ -121,39 +121,34 @@ def test_get_client_time_to_live_called(
mock_set_table_ttl.assert_called_once()
def test_get_or_create_table_not_exists(self):
+ from botocore.exceptions import ClientError
+
self.backend._client = MagicMock()
mock_create_table = self.backend._client.create_table = MagicMock()
+ client_error = ClientError(
+ {
+ 'Error': {
+ 'Code': 'ResourceNotFoundException'
+ }
+ },
+ 'DescribeTable'
+ )
mock_describe_table = self.backend._client.describe_table = \
MagicMock()
-
- mock_describe_table.return_value = {
- 'Table': {
- 'TableStatus': 'ACTIVE'
- }
- }
+ mock_describe_table.side_effect = client_error
+ self.backend._wait_for_table_status = MagicMock()
self.backend._get_or_create_table()
+ mock_describe_table.assert_called_once_with(
+ TableName=self.backend.table_name
+ )
mock_create_table.assert_called_once_with(
**self.backend._get_table_schema()
)
def test_get_or_create_table_already_exists(self):
- from botocore.exceptions import ClientError
-
self.backend._client = MagicMock()
mock_create_table = self.backend._client.create_table = MagicMock()
- client_error = ClientError(
- {
- 'Error': {
- 'Code': 'ResourceInUseException',
- 'Message': 'Table already exists: {}'.format(
- self.backend.table_name
- )
- }
- },
- 'CreateTable'
- )
- mock_create_table.side_effect = client_error
mock_describe_table = self.backend._client.describe_table = \
MagicMock()
@@ -167,6 +162,7 @@ def test_get_or_create_table_already_exists(self):
mock_describe_table.assert_called_once_with(
TableName=self.backend.table_name
)
+ mock_create_table.assert_not_called()
def test_wait_for_table_status(self):
self.backend._client = MagicMock()
| dynamoDB result backend incorrect exception handling when table exists
The following code
https://github.com/celery/celery/blob/d0f5300691ca594f2311daf542aa63367622c027/celery/backends/dynamodb.py#L192
tries to create or get the table. In case the table exists and the role executing that code on AWS does not have the `CreateTable` permission, the raised exception is not the one expected by that particular line. Yet the table exists _and_ the exception is raised because the code tries to create it while lacking permissions.
To reproduce:
* celery 5.0.5
* create the result backend table and give the following permissions to the role executing celery on that table:
```
"dynamodb:DescribeTable",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"dynamodb:DeleteItem",
"dynamodb:BatchWriteItem",
"dynamodb:GetItem",
"dynamodb:BatchGetItem",
"dynamodb:Scan",
"dynamodb:Query",
"dynamodb:ConditionCheckItem"
```
* remove the permission CreateTable from the role executing the code
Possible solution:
* handle the check of the table existence with another boto3 call such as [`describe_table`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_table) and then fall-back into the table creation
* provide a configuration that, when indicated, assumes the existence of the table
* handle that exception differently
| > Possible solution:
>
> * handle the check of the table existence with another boto3 call such as [`describe_table`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_table) and then fall-back into the table creation
>
> * provide a configuration that, when indicated, assumes the existence of the table
>
> * handle that exception differently
I am no expert on dynamo DB. But if you can come with a change request as your proposed possible solutions we could verify & help to improve.
@galCohen88 can you try to fix this?
@lyajedi let's continue here | 2022-08-30T15:23:38 |
celery/celery | 7,785 | celery__celery-7785 | [
"7783"
] | 88a031634b03210bffa417b41ec0bd8cf0876ba0 | diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,6 +1,7 @@
"""Celery Command Line Interface."""
import os
import pathlib
+import sys
import traceback
try:
@@ -75,7 +76,16 @@ def convert(self, value, param, ctx):
APP = App()
-@with_plugins(entry_points().get('celery.commands', []))
+if sys.version_info >= (3, 10):
+ _PLUGINS = entry_points(group='celery.commands')
+else:
+ try:
+ _PLUGINS = entry_points().get('celery.commands', [])
+ except AttributeError:
+ _PLUGINS = entry_points().select(group='celery.commands')
+
+
+@with_plugins(_PLUGINS)
@click.group(cls=DYMGroup, invoke_without_command=True)
@click.option('-A',
'--app',
diff --git a/celery/utils/imports.py b/celery/utils/imports.py
--- a/celery/utils/imports.py
+++ b/celery/utils/imports.py
@@ -141,7 +141,14 @@ def gen_task_name(app, name, module_name):
def load_extension_class_names(namespace):
- for ep in entry_points().get(namespace, []):
+ if sys.version_info >= (3, 10):
+ _entry_points = entry_points(group=namespace)
+ else:
+ try:
+ _entry_points = entry_points().get(namespace, [])
+ except AttributeError:
+ _entry_points = entry_points().select(group=namespace)
+ for ep in _entry_points:
yield ep.name, ep.value
| Celery Import Error
<!--
Please use one of our issue templates.
We reserve the right to close bug reports or feature requests who don't use our templates.
-->
Not able to import Celery module when creating simple app.
from celery import Celery
ImportError: cannot import name 'Celery' from 'celery'
Additonal info:
This issue doesn't occur when we downgrade importlib-metadata to 4.12.0
Env details

| Hey @marimuthuei :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
What python version? Seems to occur only with python 3.7 #7784
Ok I guess because `importlib-metadata` is not installed when you install celery in a python 3.8 environment.
similar imports are failing (e.g. `current_app`) with the same behavior (works when downgrading `importlib-metadata`). Seems to only be in python 3.7.
Tests with py3.8 **and** importlib-metadata works for me.
its python3.7 env
This seems to be the cause: https://github.com/python/importlib_metadata/pull/405
```python
Successfully installed importlib_metadata-4.13.0
$ python
Python 3.7.9 (default, Jul 5 2021, 22:17:33)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from importlib_metadata import entry_points
>>> entry_points().get("celery.result_backends", list())
__main__:1: DeprecationWarning: SelectableGroups dict interface is deprecated. Use select.
[]
```
So I guess we should fix
https://github.com/celery/celery/blob/59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d/celery/utils/imports.py#L142-L145
The kombu package probably has the same problem
https://github.com/celery/kombu/blob/97e887605b21a110e0ec304913930a8115987c6a/kombu/utils/compat.py#L93 | 2022-10-03T09:44:32 |
|
celery/celery | 7,873 | celery__celery-7873 | [
"7713"
] | be1d3c086d5059f9ac261744909b8c624a9b0983 | diff --git a/celery/result.py b/celery/result.py
--- a/celery/result.py
+++ b/celery/result.py
@@ -651,8 +651,11 @@ def ready(self):
def completed_count(self):
"""Task completion count.
+ Note that `complete` means `successful` in this context. In other words, the
+ return value of this method is the number of ``successful`` tasks.
+
Returns:
- int: the number of tasks completed.
+ int: the number of complete (i.e. successful) tasks.
"""
return sum(int(result.successful()) for result in self.results)
| completed_count reports only successfully completed tasks
Not sure if that's on purpose but it looks like `completed_count` only takes into account the tasks that have completed successfully:
https://github.com/celery/celery/blob/3db7c9dde9a4d5aa9c0eda8c43a219de1baa9f02/celery/result.py#L633
Would it make more sense to report:
```
return sum(int(result.ready()) for result in self.results)
```
the task count in the "ready" state?
Happy to open a PR if that's an acceptable change.
| Hey @emilroz :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
One might say that a task has been completed only if it hasn't failed.
Otherwise, the execution of the task is halted and it is not completed.
I don't think we should change the public API between minor versions but I'm open to amending the documentation.
Sounds good to me. I'm more than happy to contribute the changes. | 2022-11-02T12:47:30 |
|
celery/celery | 7,945 | celery__celery-7945 | [
"4806"
] | dd811b37717635b5f7151a7adf9f5bf12e1bc0c6 | diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -78,6 +78,7 @@ def __repr__(self):
scheduler=Option('celery.beat:PersistentScheduler'),
schedule_filename=Option('celerybeat-schedule'),
sync_every=Option(0, type='int'),
+ cron_starting_deadline=Option(None, type=int)
),
broker=Namespace(
url=Option(None, type='string'),
diff --git a/celery/schedules.py b/celery/schedules.py
--- a/celery/schedules.py
+++ b/celery/schedules.py
@@ -36,7 +36,6 @@
{0._orig_day_of_week} (m/h/dM/MY/d)>\
"""
-
SOLAR_INVALID_LATITUDE = """\
Argument latitude {lat} is invalid, must be between -90 and 90.\
"""
@@ -608,16 +607,48 @@ def remaining_estimate(self, last_run_at, ffwd=ffwd):
def is_due(self, last_run_at):
"""Return tuple of ``(is_due, next_time_to_run)``.
+ If :setting:`beat_cron_starting_deadline` has been specified, the
+ scheduler will make sure that the `last_run_at` time is within the
+ deadline. This prevents tasks that could have been run according to
+ the crontab, but didn't, from running again unexpectedly.
+
Note:
Next time to run is in seconds.
SeeAlso:
:meth:`celery.schedules.schedule.is_due` for more information.
"""
+
rem_delta = self.remaining_estimate(last_run_at)
- rem = max(rem_delta.total_seconds(), 0)
+ rem_secs = rem_delta.total_seconds()
+ rem = max(rem_secs, 0)
due = rem == 0
- if due:
+
+ deadline_secs = self.app.conf.beat_cron_starting_deadline
+ has_passed_deadline = False
+ if deadline_secs is not None:
+ # Make sure we're looking at the latest possible feasible run
+ # date when checking the deadline.
+ last_date_checked = last_run_at
+ last_feasible_rem_secs = rem_secs
+ while rem_secs < 0:
+ last_date_checked = last_date_checked + abs(rem_delta)
+ rem_delta = self.remaining_estimate(last_date_checked)
+ rem_secs = rem_delta.total_seconds()
+ if rem_secs < 0:
+ last_feasible_rem_secs = rem_secs
+
+ # if rem_secs becomes 0 or positive, second-to-last
+ # last_date_checked must be the last feasible run date.
+ # Check if the last feasible date is within the deadline
+ # for running
+ has_passed_deadline = -last_feasible_rem_secs > deadline_secs
+ if has_passed_deadline:
+ # Should not be due if we've passed the deadline for looking
+ # at past runs
+ due = False
+
+ if due or has_passed_deadline:
rem_delta = self.remaining_estimate(self.now())
rem = max(rem_delta.total_seconds(), 0)
return schedstate(due, rem)
| diff --git a/t/unit/app/test_beat.py b/t/unit/app/test_beat.py
--- a/t/unit/app/test_beat.py
+++ b/t/unit/app/test_beat.py
@@ -696,16 +696,19 @@ def now_func():
'first_missed', 'first_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
+ app=self.app,
schedule=app_schedule['first_missed']['schedule']),
'second_missed': beat.ScheduleEntry(
'second_missed', 'second_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
+ app=self.app,
schedule=app_schedule['second_missed']['schedule']),
'non_missed': beat.ScheduleEntry(
'non_missed', 'non_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
+ app=self.app,
schedule=app_schedule['non_missed']['schedule']),
}
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py
--- a/t/unit/app/test_schedules.py
+++ b/t/unit/app/test_schedules.py
@@ -800,3 +800,136 @@ def test_yearly_execution_is_not_due(self):
due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30))
assert not due
assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60
+
+ def test_execution_not_due_if_task_not_run_at_last_feasible_time_outside_deadline(
+ self):
+ """If the crontab schedule was added after the task was due, don't
+ immediately fire the task again"""
+ # could have feasibly been run on 12/5 at 7:30, but wasn't.
+ self.app.conf.beat_cron_starting_deadline = 3600
+ last_run = datetime(2022, 12, 4, 10, 30)
+ now = datetime(2022, 12, 5, 10, 30)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert not due
+
+ def test_execution_not_due_if_task_not_run_at_last_feasible_time_no_deadline_set(
+ self):
+ """Same as above test except there's no deadline set, so it should be
+ due"""
+ last_run = datetime(2022, 12, 4, 10, 30)
+ now = datetime(2022, 12, 5, 10, 30)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert due
+
+ def test_execution_due_if_task_not_run_at_last_feasible_time_within_deadline(
+ self):
+ # Could have feasibly been run on 12/5 at 7:30, but wasn't. We are
+ # still within a 1 hour deadline from the
+ # last feasible run, so the task should still be due.
+ self.app.conf.beat_cron_starting_deadline = 3600
+ last_run = datetime(2022, 12, 4, 10, 30)
+ now = datetime(2022, 12, 5, 8, 0)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert due
+
+ def test_execution_due_if_task_not_run_at_any_feasible_time_within_deadline(
+ self):
+ # Could have feasibly been run on 12/4 at 7:30, or 12/5 at 7:30,
+ # but wasn't. We are still within a 1 hour
+ # deadline from the last feasible run (12/5), so the task should
+ # still be due.
+ self.app.conf.beat_cron_starting_deadline = 3600
+ last_run = datetime(2022, 12, 3, 10, 30)
+ now = datetime(2022, 12, 5, 8, 0)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert due
+
+ def test_execution_not_due_if_task_not_run_at_any_feasible_time_outside_deadline(
+ self):
+ """Verifies that remaining is still the time to the next
+ feasible run date even though the original feasible date
+ was passed over in favor of a newer one."""
+ # Could have feasibly been run on 12/4 or 12/5 at 7:30,
+ # but wasn't.
+ self.app.conf.beat_cron_starting_deadline = 3600
+ last_run = datetime(2022, 12, 3, 10, 30)
+ now = datetime(2022, 12, 5, 11, 0)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert not due
+
+ def test_execution_not_due_if_last_run_in_future(self):
+ # Should not run if the last_run hasn't happened yet.
+ last_run = datetime(2022, 12, 6, 7, 30)
+ now = datetime(2022, 12, 5, 10, 30)
+ expected_next_execution_time = datetime(2022, 12, 7, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert not due
+ assert remaining == expected_remaining
+
+ def test_execution_not_due_if_last_run_at_last_feasible_time(self):
+ # Last feasible time is 12/5 at 7:30
+ last_run = datetime(2022, 12, 5, 7, 30)
+ now = datetime(2022, 12, 5, 10, 30)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert not due
+
+ def test_execution_not_due_if_last_run_past_last_feasible_time(self):
+ # Last feasible time is 12/5 at 7:30
+ last_run = datetime(2022, 12, 5, 8, 30)
+ now = datetime(2022, 12, 5, 10, 30)
+ expected_next_execution_time = datetime(2022, 12, 6, 7, 30)
+ expected_remaining = (
+ expected_next_execution_time - now).total_seconds()
+
+ # Run the daily (7:30) crontab with the current date
+ with patch_crontab_nowfun(self.daily, now):
+ due, remaining = self.daily.is_due(last_run)
+ assert remaining == expected_remaining
+ assert not due
| celery.schedules.crontab is_due logic can trigger periodic celery beat tasks at arbitrary times unrelated to the crontab parameters when last_run_at value is sufficiently old
There's an issue in the implementation of `celery.schedules.crontab` method that can cause `crontab.is_due` to claim a schedule is due at a time that is completely unrelated to the given crontab parameters. This appears to happen in cases where the `last_run_at` value is older than the most recent feasible time the schedule could have run. It looks like that this issue was introduced as part of complex logic added nearly 8 years ago to improve the accuracy of time remaining estimates for `crontab` periodic tasks: 4ed89ec49582b540149cf06047f091ebd20fb300
## Checklist
Issue first observed in a celery deployment running celery v3.1.19
From inspecting the `celery.schedules.crontab` code it appears likely that this issue is present in all celery versions as far back as v2.1.0 through to v4.1.1 .
Unit tests (please see below) confirm issue is still present in master (b599b96960be9dd42b3dee82a58bd1d711df0317 at time of writing).
## Steps to reproduce
Please apply this patch to celery master branch, remove `@skip.todo` from the first new test, run unit tests, observe the first of these added unit tests fails:
```
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py
index a7b3025..0340461 100644
--- a/t/unit/app/test_schedules.py
+++ b/t/unit/app/test_schedules.py
@@ -26,6 +26,18 @@ def patch_crontab_nowfun(cls, retval):
cls.nowfun = prev_nowfun
+def is_time_feasible_wrt_crontab_schedule(t, z):
+ # z : celery.schedules.crontab instance
+ t = z.maybe_make_aware(t)
+ return (
+ t.month in z.month_of_year and
+ (t.isoweekday() % 7) in z.day_of_week and
+ t.day in z.day_of_month and
+ t.hour in z.hour and
+ t.minute in z.minute
+ )
+
+
@skip.unless_module('ephem')
class test_solar:
@@ -803,3 +815,59 @@ class test_crontab_is_due:
due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30))
assert not due
assert remaining == 4 * 24 * 60 * 60 - 3 * 60 * 60
+
+ @skip.todo('FIXME crontab logic is defective when last_run_at is older than the most recent feasible time wrt schedule')
+ def test_daily_execution_if_last_run_at_was_days_ago_and_current_time_does_not_match_crontab_schedule_then_execution_is_not_due(self):
+ z = self.crontab(hour=7, minute=30)
+ last_run_at = datetime(2018, 6, 1, 7, 30)
+ now = datetime(2018, 6, 9, 23, 48)
+ expected_next_execution_time = datetime(2018, 6, 10, 7, 30)
+ expected_remaining = (expected_next_execution_time - now).total_seconds()
+ # check our assumptions
+ assert is_time_feasible_wrt_crontab_schedule(last_run_at, z)
+ assert not is_time_feasible_wrt_crontab_schedule(now, z)
+ assert is_time_feasible_wrt_crontab_schedule(expected_next_execution_time, z)
+ assert now < expected_next_execution_time
+ assert expected_remaining == (7 * 60 + 30 + 12) * 60
+ # test is_due
+ with patch_crontab_nowfun(z, now):
+ due, remaining = z.is_due(last_run_at=last_run_at)
+ assert remaining == expected_remaining
+ assert not due
+
+ def test_daily_execution_if_last_run_at_was_the_most_recent_feasible_time_wrt_schedule_in_past_and_current_time_does_not_match_crontab_schedule_then_execution_is_not_due(self):
+ z = self.crontab(hour=7, minute=30)
+ last_run_at = datetime(2018, 6, 9, 7, 30)
+ now = datetime(2018, 6, 9, 23, 48)
+ expected_next_execution_time = datetime(2018, 6, 10, 7, 30)
+ expected_remaining = (expected_next_execution_time - now).total_seconds()
+ # check our assumptions
+ assert is_time_feasible_wrt_crontab_schedule(last_run_at, z)
+ assert not is_time_feasible_wrt_crontab_schedule(now, z)
+ assert is_time_feasible_wrt_crontab_schedule(expected_next_execution_time, z)
+ assert now < expected_next_execution_time
+ assert expected_remaining == (7 * 60 + 30 + 12) * 60
+ # test is_due
+ with patch_crontab_nowfun(z, now):
+ due, remaining = z.is_due(last_run_at=last_run_at)
+ assert remaining == expected_remaining
+ assert not due
+
+ def test_daily_execution_if_last_run_at_was_more_recent_than_the_most_recent_feasible_time_wrt_schedule_in_past_and_current_time_does_not_match_crontab_schedule_then_execution_is_not_due(self):
+ z = self.crontab(hour=7, minute=30)
+ last_run_at = datetime(2018, 6, 9, 10, 30) # not feasible wrt to current schedule. case can happen if schedule is modified after a run
+ now = datetime(2018, 6, 9, 23, 48)
+ expected_next_execution_time = datetime(2018, 6, 10, 7, 30)
+ expected_remaining = (expected_next_execution_time - now).total_seconds()
+ # check our assumptions
+ assert not is_time_feasible_wrt_crontab_schedule(last_run_at, z)
+ assert not is_time_feasible_wrt_crontab_schedule(now, z)
+ assert is_time_feasible_wrt_crontab_schedule(expected_next_execution_time, z)
+ assert now < expected_next_execution_time
+ assert expected_remaining == (7 * 60 + 30 + 12) * 60
+ # test is_due
+ with patch_crontab_nowfun(z, now):
+ due, remaining = z.is_due(last_run_at=last_run_at)
+ assert remaining == expected_remaining
+ assert not due
+
```
## Expected behavior
No matter what the value of `last_run_at` is, `crontab.is_due(last_run_at)` should never return `schedstate(True, rem)` when the current time `now` is not feasible with respect to the given crontab parameters.
## Actual behavior
if `last_run_at` value is older than the most recent time that is feasible with respect to the given crontab parameters then `crontab.is_due(last_run_at)` returns `schedstate(True, rem)` when the current time is not feasible with respect to the crontab parameters.
## Comments
This behaviour is surprising as it is an undocumented departure from cron-like behaviour. This behaviour is somewhat like an undocumented variant of uncontrollable `anacron` behaviour. "Uncontrollable" in the sense that unlike `anacrontab`'s `START_HOURS_RANGE` parameter - there is no control at all over when tasks will be run when a scheduled execution is missed.
We experience this issue in production using the venerable Celery version of 3.1.19 : we have a celery beat process that is backed by a database using custom scheduler code that is derived from the django-celery-beat scheduler. Suppose we stop this celery beat process for some reason such as scheduled maintenance or during a deployment that needs to redeploy code to our celery cluster. When we later restart celery beat again then some or all `celery.schedules.crontab` scheduled tasks may immediately execute if there was a time during the celery beat downtime period that matches the crontab parameters.
One hack that can mitigate this behaviour is updating the "last_run_at" value for all celery crontab scheduled periodic tasks to the current time when celery beat starts, before celery beat makes any calls to the `celery.schedules.crontab.is_due` method.
| I am in the process of rewriting the `celery.schedules.crontab` logic to fix this cleanly, if i can get the green light from my client to release a fix upstream i will submit a pull request with a proposed patch to fix this within the next week or two.
@fcostin did you ever complete your `crontab` rewrite and/or (not) get permission to release?
Can you send a PR with proposed fix?
@fdemmer -
One crude way to fix this is to simply revert the old change that introduced this issue: https://github.com/celery/celery/commit/4ed89ec49582b540149cf06047f091ebd20fb300 , in particular the parts of the patch to `celery/schedules.py` that introduce the much more complicated logic that estimates the time remaining until the next job.
This would have downside of breaking the logic that estimates when the next scheduled task is due, and causing the scheduler to need to poll every second to re-check if a task is due yet. But it would fix this issue.
I wrote a better patch for fixing this last year (that didn't rely on polling every 1 second, and could estimate remaining time until the next scheduled task) which resolved the issue internally but finished up my contract shortly after and ran out of time to lobby for permission to release the fix upstream. I'll reach out and see if anyone there is willing to work with me to get the patch released (chances of this are low, but worth a try...).
thanks for the response @fcostin :)
my requirement is adding tz support. i thought about rewriting/fixing/adding to celery's crontab, but decided it would be better to not hack around in that code or pile up on top, so i wrote my own using [tzcron](https://github.com/bloomberg/tzcron) to parse the cron expression and calculate the next event.
- it calculates the timestamp for the next trigger based only on "now" (ignoring `last_run_at`), so the bug from this issue _should_ be fixed.
- the cron parameters can be passed as string or separate args, in the order requested in #4570
https://gist.github.com/fdemmer/7551bff2bab80b56aac5018060aded55
it hasn't been used a lot and has no tests, but _seems_ to work and is licensed under MIT like all my gists.
hi @fdemmer -- adding timezone support sounds like a great idea, as does your implementation that avoids using the existing celery crontab scheduler logic completely.
Your `pytzcrontab` implementation looks pretty good . If i was aware of this last year that would have likely been a cleaner fix than my patch. thanks for sharing!
> it hasn't been used a lot and has no tests, but seems to work and is licensed under MIT like all my gists.
Sounds promising!
I have subjected your `pytzcrontab` class to the existing unit tests for the crontab scheduler -- with some patches to those unit tests to fix up timezones as necessary -- including adding three new tests from my patch in this issue.
here are my experimental patches, including adding `celery-tzcron.py` from your gist as `celery/scheduler_tzcron.py`:
https://github.com/fcostin/celery/commits/scheduler_tzcron_experiments
the results are fairly good:
1. `pytzcrontab` appears to fix this issue, as you predict. this makes sense.
2. with a small patch for compatibility with the existing `crontab` scheduler to define `hour` ... `day_of_year` properties, `pytzcrontab` passes most of the old unit tests
3. with some more patches to your `is_due` and `remaining_estimate` logic, the `pytzcrontab` implementation behaves in a closer way to how the existing `crontab` class decides when things are due. This change appears to be necessary so that `pytzcrontab` does not say the schedule is due when the schedule last ran a very short time ago (e.g. 0 seconds ago!). i am not completely happy with my proposed change here, it doesn't seem completely clean, maybe you can think of a nicer way to do it.
3. `pytzcrontab` introduces one regression that the existing celery unit tests can detect-- it goes into an infinite loop if we try to schedule something on an impossible day (31st of april) . I looked for an obvious way to avoid this but didn't find a good one so i've marked the test as skipped, but this would be good to fix. I guess it could be fixed by additional up-front validation logic, but there is still some risk that a gap between validation and the behaviour of the `tzcron` library might lead to an infinite loop.
```
@pytest.mark.skip("broken - pytzcrontab goes into an infinite loop here")
def test_invalid_specification(self):
# *** WARNING ***
# This test triggers an infinite loop in case of a regression
with pytest.raises(RuntimeError):
self.next_ocurrance(
self.pytzcrontab(day_of_month=31, month_of_year=4),
datetime_utc(2010, 1, 28, 14, 30, 15),
)
```
What do you think?
Would it make sense to contribute something like this as a pull request into celery itself?
This issue is fairly old is there a fix or a work around for this? I see the gist above but I'd rather not have to maintain some other version of celery.
Right now I'm facing an issue with duplicate ETL imports because of this. I run all crontab schedules but if I make an on the fly change, either adding a new periodic task or changing the schedule of one, it will trigger ALL of my tasks to run.
That's very unfortunate. We're sorry that this is happening.
I'm unaware of a workaround for this issue.
Unfortunately, we never received a PR which fixes this issue.
If you'd like to help, please take the patch above and do the work necessary to get it merged.
In all honesty this seems like its been an issue for a very long time and frankly surprised its not a much higher priority bug to be fixed. For a scheduling app to incorrectly schedule a task seems like a pretty big issue.
Anyways, I appreciate you taking the time to respond. I can try to look at the code, but in all honesty I'm not very good with following large projects like Celery. If I get some time over Christmas holiday I will have a go at trying to see where it can be fixed.
Stay safe and have a wonderful holiday!
> In all honesty this seems like its been an issue for a very long time and frankly surprised its not a much higher priority bug to be fixed. For a scheduling app to incorrectly schedule a task seems like a pretty big issue.
>
> Anyways, I appreciate you taking the time to respond. I can try to look at the code, but in all honesty I'm not very good with following large projects like Celery. If I get some time over Christmas holiday I will have a go at trying to see where it can be fixed.
>
> Stay safe and have a wonderful holiday!
if you can take the lead we could help you guide you through all the way
So now I'm not sure if my issue is related to this. I was trying to reproduce this with my app and wasn't able to reproduce it until today.
The way it triggered wasn't by adding/removing/updating a periodic task, but by adding a new schedule and then assigning that to a newly created periodic task. I don't know if this is due to the library I'm using to store it in a database or relating to this issue. I'll have to dig a bit deeper when I can.
We ran into this issue on our production setup and charged our subscription customers ahead of schedule when we restarted the services with a slight change in the scheduled time. This should be a higher priority bug.
I will try and post the steps to reproduce this reliably.
As a work around: since I'm using an external scheduler library for managing the beat schedules I just simply have an event listener on beat startup null out the last_run_at field for each schedule in the database. This will prevent beat from running any of the schedules prematurely.
>
>
> In all honesty this seems like its been an issue for a very long time and frankly surprised its not a much higher priority bug to be fixed. For a scheduling app to incorrectly schedule a task seems like a pretty big issue.
This is not a priority as none of us needs to fix this and no one is paying us to do so.
I am not sure if we are experiencing this same issue, here is what the celerybeat-schedule looks like:
```
>>> schedule = shelve.open("celerybeat-schedule", writeback=True)
>>> task = schedule["entries"]["My task"]
>>> task.last_run_at
datetime.datetime(2021, 5, 18, 8, 36, 0, 99, tzinfo=<UTC>)
>>> task.is_due()
schedstate(is_due=True, next=48.84625)
>>> task.schedule
<crontab: * 8 * * * (m/h/d/dM/MY)>
>>> datetime.now()
datetime.datetime(2021, 5, 18, 8, 39, 36, 601785)
```
The above task runs continously between 8am and 9 am __sometimes__ but not always... Using `celery==5.0.5`
@latusaki Do you feel like investigating further?
Our capacity is already maxed out with the 5.1 release.
@binu-mobiux Do you have a way to reproduce this issue?
@thedrow I can't commit as I don't have enough time, not expecting a fix, was just unsure if this bug is the same described above. I have now changed my crontab from 8am to 7:59 and waiting to see if the problem resurfaces.
**Update**
With time set to 7:59 the task only runs once at the specified time, haven't observed the issue over the last two weeks.
I have the same issue. I use crontab(hour='*/3') and once the top of the hour hits, it runs on an infinite loop.
@dangell7 Do you have time to investigate or contribute a PR? | 2022-12-06T15:35:26 |
celery/celery | 7,951 | celery__celery-7951 | [
"7946"
] | dd811b37717635b5f7151a7adf9f5bf12e1bc0c6 | diff --git a/celery/app/defaults.py b/celery/app/defaults.py
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -89,6 +89,7 @@ def __repr__(self):
connection_retry=Option(True, type='bool'),
connection_retry_on_startup=Option(None, type='bool'),
connection_max_retries=Option(100, type='int'),
+ channel_error_retry=Option(False, type='bool'),
failover_strategy=Option(None, type='string'),
heartbeat=Option(120, type='int'),
heartbeat_checkrate=Option(3.0, type='int'),
diff --git a/celery/worker/consumer/consumer.py b/celery/worker/consumer/consumer.py
--- a/celery/worker/consumer/consumer.py
+++ b/celery/worker/consumer/consumer.py
@@ -328,9 +328,13 @@ def start(self):
crit('Frequent restarts detected: %r', exc, exc_info=1)
sleep(1)
self.restart_count += 1
+ if self.app.conf.broker_channel_error_retry:
+ recoverable_errors = (self.connection_errors + self.channel_errors)
+ else:
+ recoverable_errors = self.connection_errors
try:
blueprint.start(self)
- except self.connection_errors as exc:
+ except recoverable_errors as exc:
# If we're not retrying connections, we need to properly shutdown or terminate
# the Celery main process instead of abruptly aborting the process without any cleanup.
is_connection_loss_on_startup = self.restart_count == 0
| diff --git a/t/unit/worker/test_consumer.py b/t/unit/worker/test_consumer.py
--- a/t/unit/worker/test_consumer.py
+++ b/t/unit/worker/test_consumer.py
@@ -4,6 +4,7 @@
from unittest.mock import MagicMock, Mock, call, patch
import pytest
+from amqp import ChannelError
from billiard.exceptions import RestartFreqExceeded
from celery import bootsteps
@@ -310,6 +311,31 @@ def test_blueprint_restart_when_state_not_in_stop_conditions(self, broker_connec
c.start()
c.blueprint.restart.assert_called_once()
+ @pytest.mark.parametrize("broker_channel_error_retry", [True, False])
+ def test_blueprint_restart_for_channel_errors(self, broker_channel_error_retry):
+ c = self.get_consumer()
+
+ # ensure that WorkerShutdown is not raised
+ c.app.conf['broker_connection_retry'] = True
+ c.app.conf['broker_connection_retry_on_startup'] = True
+ c.app.conf['broker_channel_error_retry'] = broker_channel_error_retry
+ c.restart_count = -1
+
+ # ensure that blueprint state is not in stop conditions
+ c.blueprint.state = bootsteps.RUN
+ c.blueprint.start.side_effect = ChannelError()
+
+ # stops test from running indefinitely in the while loop
+ c.blueprint.restart.side_effect = self._closer(c)
+
+ # restarted only when broker_channel_error_retry is True
+ if broker_channel_error_retry:
+ c.start()
+ c.blueprint.restart.assert_called_once()
+ else:
+ with pytest.raises(ChannelError):
+ c.start()
+
def test_collects_at_restart(self):
c = self.get_consumer()
c.connection.collect.side_effect = MemoryError()
| Regard backend redis's switching to replica as recoverable error
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the if the same enhancement was already implemented in the
master branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- https://github.com/celery/celery/issues/6348
- The situation is similar, but this issue is based on Redis Sentinel and we use AWS ElastiCache. Error message is also a bit different.
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
When AWS ElastiCache for Redis has a replication structure and causes a failover, Celery doesn't failover and the main process tries graceful shutdown. It means,
- main process exits due to Unrecoverable error when there is no remaining task
- main process hangs until the remaining task finishes
The second one is critical for us because we have many long ongoing tasks. The first one is better than it because we automatically reboot the process, but we'd like to avoid it because Redis failover is a kind of expected behavior.
If some change is acceptable, we will try to create a pull request by ourselves.
# Detail
We got the following error when ElastiCache failover happened.
```
[CRITICAL - 2022-11-04 10:45:27 - 21 worker:207] Unrecoverable error: ReadOnlyError("You can't write against a read only replica.")
Traceback (most recent call last):
File "/var/www/venv/lib/python3.7/site-packages/celery/worker/worker.py", line 203, in start
self.blueprint.start(self)
File "/var/www/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/var/www/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 365, in start
return self.obj.start()
File "/var/www/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 326, in start
blueprint.start(self)
File "/var/www/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/var/www/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 618, in start
c.loop(*c.loop_args())
File "/var/www/venv/lib/python3.7/site-packages/celery/worker/loops.py", line 97, in asynloop
next(loop)
File "/var/www/venv/lib/python3.7/site-packages/kombu/asynchronous/hub.py", line 362, in create_loop
cb(*cbargs)
File "/var/www/venv/lib/python3.7/site-packages/kombu/transport/redis.py", line 1266, in on_readable
self.cycle.on_readable(fileno)
File "/var/www/venv/lib/python3.7/site-packages/kombu/transport/redis.py", line 504, in on_readable
chan.handlers[type]()
File "/var/www/venv/lib/python3.7/site-packages/kombu/transport/redis.py", line 898, in _brpop_read
**options)
File "/var/www/venv/lib/python3.7/site-packages/redis/client.py", line 1189, in parse_response
response = connection.read_response()
File "/var/www/venv/lib/python3.7/site-packages/redis/connection.py", line 817, in read_response
raise response
redis.exceptions.ReadOnlyError: You can't write against a read only replica.
```
As far as we checked, an error during connection becomes `ConnectionError` and regarded as "Recoverable", but `ReadOnlyError` (and its parent `ResponseError`) is regarded as "Unrecoverable".
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
Maybe this part in kombu is related
https://github.com/celery/kombu/blob/v5.2.3/kombu/transport/redis.py#L128-L141
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
When an error happens due to switching master to replica, retry connection and connect to the new master.
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
- Adding some option to regard ResponseError as "Recoverable". Configuration key like `redis_retry_on_response_error` or `redis_retry_on_failover`?
But we're not sure if regarding ResponseError as "Recoverable" has a critical impact on existing behavior. We'd really appreciate advice from the community.
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| Hey @nkns165 :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2022-12-09T16:00:52 |
celery/celery | 8,098 | celery__celery-8098 | [
"8080"
] | 3bff3f06740a0d509f807e14702f7144b043ae54 | diff --git a/celery/result.py b/celery/result.py
--- a/celery/result.py
+++ b/celery/result.py
@@ -14,7 +14,6 @@
from .app import app_or_default
from .exceptions import ImproperlyConfigured, IncompleteStream, TimeoutError
from .utils.graph import DependencyGraph, GraphFormatter
-from .utils.iso8601 import parse_iso8601
try:
import tblib
@@ -530,7 +529,7 @@ def date_done(self):
"""UTC date and time."""
date_done = self._get_task_meta().get('date_done')
if date_done and not isinstance(date_done, datetime.datetime):
- return parse_iso8601(date_done)
+ return datetime.datetime.fromisoformat(date_done)
return date_done
@property
diff --git a/celery/utils/iso8601.py b/celery/utils/iso8601.py
--- a/celery/utils/iso8601.py
+++ b/celery/utils/iso8601.py
@@ -37,6 +37,8 @@
from pytz import FixedOffset
+from celery.utils.deprecated import warn
+
__all__ = ('parse_iso8601',)
# Adapted from http://delete.me.uk/2005/03/iso8601.html
@@ -53,6 +55,7 @@
def parse_iso8601(datestring):
"""Parse and convert ISO-8601 string to datetime."""
+ warn("parse_iso8601", "v5.3", "v6", "datetime.datetime.fromisoformat")
m = ISO8601_REGEX.match(datestring)
if not m:
raise ValueError('unable to parse date string %r' % datestring)
diff --git a/celery/utils/time.py b/celery/utils/time.py
--- a/celery/utils/time.py
+++ b/celery/utils/time.py
@@ -13,7 +13,6 @@
from pytz import utc
from .functional import dictfilter
-from .iso8601 import parse_iso8601
from .text import pluralize
__all__ = (
@@ -257,7 +256,7 @@ def maybe_iso8601(dt):
return
if isinstance(dt, datetime):
return dt
- return parse_iso8601(dt)
+ return datetime.fromisoformat(dt)
def is_naive(dt):
| diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -13,7 +13,6 @@
from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import Ignore, ImproperlyConfigured, Retry
from celery.result import AsyncResult, EagerResult
-from celery.utils.time import parse_iso8601
try:
from urllib.error import HTTPError
@@ -889,11 +888,11 @@ def assert_next_task_data_equal(self, consumer, presult, task_name,
assert task_headers['task'] == task_name
if test_eta:
assert isinstance(task_headers.get('eta'), str)
- to_datetime = parse_iso8601(task_headers.get('eta'))
+ to_datetime = datetime.fromisoformat(task_headers.get('eta'))
assert isinstance(to_datetime, datetime)
if test_expires:
assert isinstance(task_headers.get('expires'), str)
- to_datetime = parse_iso8601(task_headers.get('expires'))
+ to_datetime = datetime.fromisoformat(task_headers.get('expires'))
assert isinstance(to_datetime, datetime)
properties = properties or {}
for arg_name, arg_value in properties.items():
| Deprecate iso8601 module
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the if the same enhancement was already implemented in the
main branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
`celery.utils.iso8601` is historic module which `.parse_iso8601()` can be replaced by `datetime.datetime.fromisoformat()` as Python 3.7 or above is now supported.
Suggest deprecate this module and use `datetime.datetime.fromisoformat()` internally.
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
- `iso8601` module is still usable before v6.0.0, but with addition of deprecation warning
- remove `iso8601` module in v6.0.0
- replace all usage of `parse_iso8601()` in the library by `datetime.datetime.fromisoformat`, with compatibility of Python 3.7 to 3.11
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
N/A
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| Hey @wongcht :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
if that is the case lets do it. | 2023-03-02T17:16:26 |
celery/celery | 8,143 | celery__celery-8143 | [
"8142"
] | a80da3965fefcf9c7638c0a264314cd194a71d1f | diff --git a/celery/app/base.py b/celery/app/base.py
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -1003,7 +1003,8 @@ def _load_config(self):
# load lazy periodic tasks
pending_beat = self._pending_periodic_tasks
while pending_beat:
- self._add_periodic_task(*pending_beat.popleft())
+ periodic_task_args, periodic_task_kwargs = pending_beat.popleft()
+ self._add_periodic_task(*periodic_task_args, **periodic_task_kwargs)
self.on_after_configure.send(sender=self, source=self._conf)
return self._conf
@@ -1023,12 +1024,19 @@ def signature(self, *args, **kwargs):
def add_periodic_task(self, schedule, sig,
args=(), kwargs=(), name=None, **opts):
+ """
+ Add a periodic task to beat schedule.
+
+ Celery beat store tasks based on `sig` or `name` if provided. Adding the
+ same signature twice make the second task override the first one. To
+ avoid the override, use distinct `name` for them.
+ """
key, entry = self._sig_to_periodic_task_entry(
schedule, sig, args, kwargs, name, **opts)
if self.configured:
- self._add_periodic_task(key, entry)
+ self._add_periodic_task(key, entry, name=name)
else:
- self._pending_periodic_tasks.append((key, entry))
+ self._pending_periodic_tasks.append([(key, entry), {"name": name}])
return key
def _sig_to_periodic_task_entry(self, schedule, sig,
@@ -1045,7 +1053,13 @@ def _sig_to_periodic_task_entry(self, schedule, sig,
'options': dict(sig.options, **opts),
}
- def _add_periodic_task(self, key, entry):
+ def _add_periodic_task(self, key, entry, name=None):
+ if name is None and key in self._conf.beat_schedule:
+ logger.warning(
+ f"Periodic task key='{key}' shadowed a previous unnamed periodic task."
+ " Pass a name kwarg to add_periodic_task to silence this warning."
+ )
+
self._conf.beat_schedule[key] = entry
def create_task_cls(self):
| diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -916,6 +916,33 @@ def add(x, y):
assert 'add1' in self.app.conf.beat_schedule
assert 'add2' in self.app.conf.beat_schedule
+ def test_add_periodic_task_expected_override(self):
+
+ @self.app.task
+ def add(x, y):
+ pass
+ sig = add.s(2, 2)
+ self.app.add_periodic_task(10, sig, name='add1', expires=3)
+ self.app.add_periodic_task(20, sig, name='add1', expires=3)
+ assert 'add1' in self.app.conf.beat_schedule
+ assert len(self.app.conf.beat_schedule) == 1
+
+ def test_add_periodic_task_unexpected_override(self, caplog):
+
+ @self.app.task
+ def add(x, y):
+ pass
+ sig = add.s(2, 2)
+ self.app.add_periodic_task(10, sig, expires=3)
+ self.app.add_periodic_task(20, sig, expires=3)
+
+ assert len(self.app.conf.beat_schedule) == 1
+ assert caplog.records[0].message == (
+ "Periodic task key='t.unit.app.test_app.add(2, 2)' shadowed a"
+ " previous unnamed periodic task. Pass a name kwarg to"
+ " add_periodic_task to silence this warning."
+ )
+
@pytest.mark.masked_modules('multiprocessing.util')
def test_pool_no_multiprocessing(self, mask_modules):
pool = self.app.pool
| Unnamed add_periodic_task unexpectedly override another periodic task
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the if the same enhancement was already implemented in the
main branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- Somewhat related #2834
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
Adding the same periodic task twice with different schedules unexpectedly override the first one.
```python
sig = add.s(2, 2)
app.add_periodic_task(10, sig)
app.add_periodic_task(20, sig)
assert len(app.conf.beat_schedule) == 1
```
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
None
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
Now I understand that I can just set a name explicitly to avoid the behavior, but it is not clear in the docs and looks like it could be warned/failed in runtime to improve developer experience. I tried it in #8143.
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| 2023-03-21T14:37:59 |
|
celery/celery | 8,152 | celery__celery-8152 | [
"8151"
] | ab34d34fecf0becc8f2b578fe769eefb74110ace | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -300,8 +300,11 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
**kwargs):
"""Start worker instance.
+ \b
Examples
--------
+
+ \b
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
| CLI help output: avoid click text rewrapping
# Checklist
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the if the same enhancement was already implemented in the
main branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
# Brief Summary
The command `celery worker --help` gives
```bash
Usage: celery worker [OPTIONS]
Start worker instance.
Examples
--------
$ celery --app=proj worker -l INFO $ celery -A proj worker -l INFO -Q
hipri,lopri $ celery -A proj worker --concurrency=4 $ celery -A proj worker
--concurrency=1000 -P eventlet $ celery worker --autoscale=10,0
```
This is caused by the [text rewrapping of `click`](https://click.palletsprojects.com/en/8.1.x/documentation/#preventing-rewrapping). The solution is to add `\b` before every paragraph which results in
```bash
Usage: celery worker [OPTIONS]
Start worker instance.
Examples
--------
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
$ celery -A proj worker --concurrency=1000 -P eventlet
$ celery worker --autoscale=10,0
```
| Hey @woutdenolf :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
| 2023-03-23T13:13:10 |
|
celery/celery | 8,301 | celery__celery-8301 | [
"8288"
] | 6d7352eb1d0baa78252d96779b05c904d0b8a2e9 | diff --git a/celery/app/trace.py b/celery/app/trace.py
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -250,8 +250,8 @@ def _log_error(self, task, req, einfo):
safe_repr(eobj),
safe_str(einfo.traceback),
einfo.exc_info,
- safe_repr(req.args),
- safe_repr(req.kwargs),
+ req.get('argsrepr') or safe_repr(req.args),
+ req.get('kwargsrepr') or safe_repr(req.kwargs),
)
policy = get_log_policy(task, einfo, eobj)
@@ -559,8 +559,8 @@ def trace_task(
'name': get_task_name(task_request, name),
'return_value': Rstr,
'runtime': T,
- 'args': safe_repr(args),
- 'kwargs': safe_repr(kwargs),
+ 'args': task_request.get('argsrepr') or safe_repr(args),
+ 'kwargs': task_request.get('kwargsrepr') or safe_repr(kwargs),
})
# -* POST *-
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -2,7 +2,6 @@
import logging
from kombu.asynchronous.timer import to_timestamp
-from kombu.utils.encoding import safe_repr
from celery import signals
from celery.app import trace as _app_trace
@@ -155,8 +154,9 @@ def task_message_handler(message, body, ack, reject, callbacks,
context = {
'id': req.id,
'name': req.name,
- 'args': safe_repr(req.args),
- 'kwargs': safe_repr(req.kwargs),
+ 'args': req.argsrepr,
+ 'kwargs': req.kwargsrepr,
+ 'eta': req.eta,
}
info(_app_trace.LOG_RECEIVED, context, extra={'data': context})
if (req.expires or req.id in revoked_tasks) and req.revoked():
| diff --git a/celery/contrib/testing/worker.py b/celery/contrib/testing/worker.py
--- a/celery/contrib/testing/worker.py
+++ b/celery/contrib/testing/worker.py
@@ -1,4 +1,5 @@
"""Embedded workers for integration tests."""
+import logging
import os
import threading
from contextlib import contextmanager
@@ -29,11 +30,48 @@
class TestWorkController(worker.WorkController):
"""Worker that can synchronize on being fully started."""
+ logger_queue = None
+
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self._on_started = threading.Event()
+
super().__init__(*args, **kwargs)
+ if self.pool_cls.__module__.split('.')[-1] == 'prefork':
+ from billiard import Queue
+ self.logger_queue = Queue()
+ self.pid = os.getpid()
+
+ try:
+ from tblib import pickling_support
+ pickling_support.install()
+ except ImportError:
+ pass
+
+ # collect logs from forked process.
+ # XXX: those logs will appear twice in the live log
+ self.queue_listener = logging.handlers.QueueListener(self.logger_queue, logging.getLogger())
+ self.queue_listener.start()
+
+ class QueueHandler(logging.handlers.QueueHandler):
+ def prepare(self, record):
+ record.from_queue = True
+ # Keep origin record.
+ return record
+
+ def handleError(self, record):
+ if logging.raiseExceptions:
+ raise
+
+ def start(self):
+ if self.logger_queue:
+ handler = self.QueueHandler(self.logger_queue)
+ handler.addFilter(lambda r: r.process != self.pid and not getattr(r, 'from_queue', False))
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+ return super().start()
+
def on_consumer_ready(self, consumer):
# type: (celery.worker.consumer.Consumer) -> None
"""Callback called when the Consumer blueprint is fully started."""
diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -1,3 +1,4 @@
+import logging
import time
from datetime import datetime, timedelta
from time import perf_counter, sleep
@@ -465,6 +466,57 @@ def test_properties(self, celery_session_worker):
assert res.get(timeout=TIMEOUT)["app_id"] == "1234"
+class test_trace_log_arguments:
+ args = "CUSTOM ARGS"
+ kwargs = "CUSTOM KWARGS"
+
+ def assert_trace_log(self, caplog, result, expected):
+ # wait for logs from worker
+ sleep(.01)
+
+ records = [(r.name, r.levelno, r.msg, r.data["args"], r.data["kwargs"])
+ for r in caplog.records
+ if r.name in {'celery.worker.strategy', 'celery.app.trace'}
+ if r.data["id"] == result.task_id
+ ]
+ assert records == [(*e, self.args, self.kwargs) for e in expected]
+
+ def call_task_with_reprs(self, task):
+ return task.set(argsrepr=self.args, kwargsrepr=self.kwargs).delay()
+
+ @flaky
+ def test_task_success(self, caplog):
+ result = self.call_task_with_reprs(add.s(2, 2))
+ value = result.get()
+ assert value == 4
+ assert result.successful() is True
+
+ self.assert_trace_log(caplog, result, [
+ ('celery.worker.strategy', logging.INFO,
+ celery.app.trace.LOG_RECEIVED,
+ ),
+ ('celery.app.trace', logging.INFO,
+ celery.app.trace.LOG_SUCCESS,
+ ),
+ ])
+
+ @flaky
+ def test_task_failed(self, caplog):
+ result = self.call_task_with_reprs(fail.s(2, 2))
+ with pytest.raises(ExpectedException):
+ result.get(timeout=5)
+ assert result.failed() is True
+
+ self.assert_trace_log(caplog, result, [
+ ('celery.worker.strategy', logging.INFO,
+ celery.app.trace.LOG_RECEIVED,
+ ),
+ ('celery.app.trace', logging.ERROR,
+ celery.app.trace.LOG_FAILURE,
+ ),
+ ])
+
+
class test_task_redis_result_backend:
@pytest.fixture()
def manager(self, manager):
diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py
--- a/t/unit/worker/test_strategy.py
+++ b/t/unit/worker/test_strategy.py
@@ -117,7 +117,7 @@ def get_request(self):
if self.was_rate_limited():
return self.consumer._limit_task.call_args[0][0]
if self.was_scheduled():
- return self.consumer.timer.call_at.call_args[0][0]
+ return self.consumer.timer.call_at.call_args[0][2][0]
raise ValueError('request not handled')
@contextmanager
@@ -176,10 +176,23 @@ def test_log_task_received(self, caplog):
for record in caplog.records:
if record.msg == LOG_RECEIVED:
assert record.levelno == logging.INFO
+ assert record.args['eta'] is None
break
else:
raise ValueError("Expected message not in captured log records")
+ def test_log_eta_task_received(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ with self._context(self.add.s(2, 2).set(countdown=10)) as C:
+ C()
+ req = C.get_request()
+ for record in caplog.records:
+ if record.msg == LOG_RECEIVED:
+ assert record.args['eta'] == req.eta
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
def test_log_task_received_custom(self, caplog):
caplog.set_level(logging.INFO, logger="celery.worker.strategy")
custom_fmt = "CUSTOM MESSAGE"
@@ -191,7 +204,23 @@ def test_log_task_received_custom(self, caplog):
C()
for record in caplog.records:
if record.msg == custom_fmt:
- assert set(record.args) == {"id", "name", "kwargs", "args"}
+ assert set(record.args) == {"id", "name", "kwargs", "args", "eta"}
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
+ def test_log_task_arguments(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ args = "CUSTOM ARGS"
+ kwargs = "CUSTOM KWARGS"
+ with self._context(
+ self.add.s(2, 2).set(argsrepr=args, kwargsrepr=kwargs)
+ ) as C:
+ C()
+ for record in caplog.records:
+ if record.msg == LOG_RECEIVED:
+ assert record.args["args"] == args
+ assert record.args["kwargs"] == kwargs
break
else:
raise ValueError("Expected message not in captured log records")
| task args and kwargs in LOG_RECEIVED and LOG_SUCCESS should use argsrepr and kwargsrepr
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [ ] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #6898
- #6885
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: N/A or Unknown
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
```python
'args': req.argsrepr,
'kwargs': req.kwargsrepr,
```
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
```python
'args': safe_repr(args),
'kwargs': safe_repr(kwargs),
```
| Hey @zhu :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
can you contribute the fix with appropriate test? | 2023-06-07T02:07:41 |
celery/celery | 8,312 | celery__celery-8312 | [
"8224",
"8224"
] | 25d6b50a84229598a2ecc3f865b9bbdabc8346b9 | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -166,8 +166,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
type=LOG_LEVEL,
help_group="Worker Options",
help="Logging level.")
[email protected]('optimization',
- '-O',
[email protected]('-O',
+ '--optimization',
default='default',
cls=CeleryOption,
type=click.Choice(('default', 'fair')),
| Why has the CLI worker command flag `optimizations` changed from `--optimizations` in 5.*?
This question is referencing this code:
https://github.com/celery/celery/blob/3b1c768c39f641a70d3086bd5a1079a421f94344/celery/bin/worker.py#L169
```
@click.option('optimization',
'-O',
default='default',
cls=CeleryOption,
type=click.Choice(('default', 'fair')),
help_group="Worker Options",
help="Apply optimization profile.")
```
Why has the flag removed the double hyphen? It seems like a bug, but I assume there was a reason for it? The reason I'm asking is that this change breaks a bunch of code we have, so I'd like to understand the change.
Why has the CLI worker command flag `optimizations` changed from `--optimizations` in 5.*?
This question is referencing this code:
https://github.com/celery/celery/blob/3b1c768c39f641a70d3086bd5a1079a421f94344/celery/bin/worker.py#L169
```
@click.option('optimization',
'-O',
default='default',
cls=CeleryOption,
type=click.Choice(('default', 'fair')),
help_group="Worker Options",
help="Apply optimization profile.")
```
Why has the flag removed the double hyphen? It seems like a bug, but I assume there was a reason for it? The reason I'm asking is that this change breaks a bunch of code we have, so I'd like to understand the change.
| Hey @stuart-bradley :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
@thedrow was this an intended change? @stuart-bradley we actually moved the CLI from argparse to click in version 5 on wards, so that might cause it. or it could be a typo
Hey team, has there been any movement on this?
we switched from argparse to click library. may be that could be a reason
Hey @stuart-bradley :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
@thedrow was this an intended change? @stuart-bradley we actually moved the CLI from argparse to click in version 5 on wards, so that might cause it. or it could be a typo
Hey team, has there been any movement on this?
we switched from argparse to click library. may be that could be a reason | 2023-06-13T07:53:20 |
|
celery/celery | 8,338 | celery__celery-8338 | [
"8336"
] | 447caaebdb44542e5b78a1cc55f9a319006143a5 | diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py
--- a/celery/backends/database/models.py
+++ b/celery/backends/database/models.py
@@ -25,6 +25,7 @@ class Task(ResultModelBase):
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
+ children = sa.Column(PickleType, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
@@ -36,6 +37,7 @@ def to_dict(self):
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done,
+ 'children': self.children,
}
def __repr__(self):
| diff --git a/t/unit/backends/test_database.py b/t/unit/backends/test_database.py
--- a/t/unit/backends/test_database.py
+++ b/t/unit/backends/test_database.py
@@ -99,6 +99,7 @@ def test_missing_task_meta_is_dict_with_pending(self):
assert meta['task_id'] == 'xxx-does-not-exist-at-all'
assert meta['result'] is None
assert meta['traceback'] is None
+ assert meta['children'] is None
def test_mark_as_done(self):
tb = DatabaseBackend(self.uri, app=self.app)
| database backend does not store children
The [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.
| Hey @aaronst :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
I can't recall any reason behind it for not supporting it in database back end. would you mind doing some experiment to add that?
It looks like [`sqlalchemy.types.ARRAY`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.ARRAY) is only supported with PostgreSQL, so it would have to be `PickleType` to be fully compatible. | 2023-06-23T22:13:09 |
celery/celery | 8,374 | celery__celery-8374 | [
"8259"
] | 811ed96edbf7d7ae0681ae67ced63e6994a6e63a | diff --git a/celery/bin/purge.py b/celery/bin/purge.py
--- a/celery/bin/purge.py
+++ b/celery/bin/purge.py
@@ -5,7 +5,9 @@
from celery.utils import text
[email protected](cls=CeleryCommand)
[email protected](cls=CeleryCommand, context_settings={
+ 'allow_extra_args': True
+})
@click.option('-f',
'--force',
cls=CeleryOption,
@@ -26,7 +28,7 @@
help="Comma separated list of queues names not to purge.")
@click.pass_context
@handle_preload_options
-def purge(ctx, force, queues, exclude_queues):
+def purge(ctx, force, queues, exclude_queues, **kwargs):
"""Erase all messages from all known task queues.
Warning:
diff --git a/celery/bin/shell.py b/celery/bin/shell.py
--- a/celery/bin/shell.py
+++ b/celery/bin/shell.py
@@ -79,7 +79,9 @@ def _invoke_default_shell(locals):
_invoke_ipython_shell(locals)
[email protected](cls=CeleryCommand)
[email protected](cls=CeleryCommand, context_settings={
+ 'allow_extra_args': True
+})
@click.option('-I',
'--ipython',
is_flag=True,
@@ -117,7 +119,7 @@ def _invoke_default_shell(locals):
@handle_preload_options
def shell(ctx, ipython=False, bpython=False,
python=False, without_tasks=False, eventlet=False,
- gevent=False):
+ gevent=False, **kwargs):
"""Start shell session with convenient access to celery symbols.
The following symbols will be added to the main globals:
diff --git a/t/unit/bin/proj/pyramid_celery_app.py b/t/unit/bin/proj/pyramid_celery_app.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/proj/pyramid_celery_app.py
@@ -0,0 +1,53 @@
+from unittest.mock import MagicMock, Mock
+
+from click import Option
+
+from celery import Celery
+
+# This module defines a mocked Celery application to replicate
+# the behavior of Pyramid-Celery's configuration by preload options.
+# Preload options should propagate to commands like shell and purge etc.
+#
+# The Pyramid-Celery project https://github.com/sontek/pyramid_celery
+# assumes that you want to configure Celery via an ini settings file.
+# The .ini files are the standard configuration file for Pyramid
+# applications.
+# See https://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tutorial/ini.html
+#
+
+app = Celery(set_as_current=False)
+app.config_from_object("t.integration.test_worker_config")
+
+
+class PurgeMock:
+ def queue_purge(self, queue):
+ return 0
+
+
+class ConnMock:
+ default_channel = PurgeMock()
+ channel_errors = KeyError
+
+
+mock = Mock()
+mock.__enter__ = Mock(return_value=ConnMock())
+mock.__exit__ = Mock(return_value=False)
+
+app.connection_for_write = MagicMock(return_value=mock)
+
+# Below are taken from pyramid-celery's __init__.py
+# Ref: https://github.com/sontek/pyramid_celery/blob/cf8aa80980e42f7235ad361874d3c35e19963b60/pyramid_celery/__init__.py#L25-L36 # noqa: E501
+ini_option = Option(
+ (
+ "--ini",
+ "-i",
+ ),
+ help="Paste ini configuration file.",
+)
+
+ini_var_option = Option(
+ ("--ini-var",), help="Comma separated list of key=value to pass to ini."
+)
+
+app.user_options["preload"].add(ini_option)
+app.user_options["preload"].add(ini_var_option)
| diff --git a/t/unit/app/test_preload_cli.py b/t/unit/app/test_preload_cli.py
new file mode 100644
--- /dev/null
+++ b/t/unit/app/test_preload_cli.py
@@ -0,0 +1,63 @@
+from click.testing import CliRunner
+
+from celery.bin.celery import celery
+
+
+def test_preload_options(isolated_cli_runner: CliRunner):
+ # Verify commands like shell and purge can accept preload options.
+ # Projects like Pyramid-Celery's ini option should be valid preload
+ # options.
+
+ # TODO: Find a way to run these separate invoke and assertions
+ # such that order does not matter. Currently, running
+ # the "t.unit.bin.proj.pyramid_celery_app" first seems
+ # to result in cache or memoization of the option.
+ # As a result, the expected exception is not raised when
+ # the invoke on "t.unit.bin.proj.app" is run as a second
+ # call.
+
+ res_without_preload = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "purge", "-f", "--ini", "some_ini.ini"],
+ catch_exceptions=True,
+ )
+
+ assert "No such option: --ini" in res_without_preload.stdout
+ assert res_without_preload.exit_code == 2
+
+ res_without_preload = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "shell", "--ini", "some_ini.ini"],
+ catch_exceptions=True,
+ )
+
+ assert "No such option: --ini" in res_without_preload.stdout
+ assert res_without_preload.exit_code == 2
+
+ res_with_preload = isolated_cli_runner.invoke(
+ celery,
+ [
+ "-A",
+ "t.unit.bin.proj.pyramid_celery_app",
+ "purge",
+ "-f",
+ "--ini",
+ "some_ini.ini",
+ ],
+ catch_exceptions=True,
+ )
+
+ assert res_with_preload.exit_code == 0
+
+ res_with_preload = isolated_cli_runner.invoke(
+ celery,
+ [
+ "-A",
+ "t.unit.bin.proj.pyramid_celery_app",
+ "shell",
+ "--ini",
+ "some_ini.ini",
+ ],
+ catch_exceptions=True,
+ )
+ assert res_with_preload.exit_code == 0
| Set allow_extra_args to True in context_settings of shell and purge commands
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
enhancement requests which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [ x ] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [ x ] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [ x ] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the if the same enhancement was already implemented in the
main branch.
- [ x ] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
https://github.com/sontek/pyramid_celery/issues/101
#### Possible Duplicates
- None
# Brief Summary
<!--
Please include a brief summary of what the enhancement is
and why it is needed.
-->
The [pyramid-celery](https://github.com/sontek/pyramid_celery) package adds `ini` and `ini-var` options to the standard commands. These allow the standard Pyramid `ini` configuration files to config Celery. With current Celery however, only the `worker`, `beat`, and `events` are able to be hooked into pyramid-celery's approach because those commands are set with.
```
context_settings={
'allow_extra_args': True
}
```
See `beat` for example: https://github.com/celery/celery/blob/f3a2cf45a69b443cac6c79a5c85583c8bd91b0a3/celery/bin/beat.py#L11
The `shell` and `purge` commands for some reason do not have context_settings and so do not set `allow_extra_args`. See for example in `shell`, https://github.com/celery/celery/blob/f3a2cf45a69b443cac6c79a5c85583c8bd91b0a3/celery/bin/shell.py#LL82C1-L117C24
The consequence is that the `ini` command options cause errors since these are extra and unknown args to these commands. Excluding the `ini` config casues `shell` when it comes up to not get the task registration configs and `purge` isn't able to see queues.
# Design
## Architectural Considerations
<!--
If more components other than Celery are involved,
describe them here and the effect it would have on Celery.
-->
[pyramid-celery](https://github.com/sontek/pyramid_celery) is on [pypi](https://pypi.org/project/pyramid-celery). Allows you to use pyramid .ini files to configure celery and have your pyramid configuration inside celery tasks.
## Proposed Behavior
<!--
Please describe in detail how this enhancement is going to change the behavior
of an existing feature.
Describe what happens in case of failures as well if applicable.
-->
This should not affect Celery functionality, but will allow pyramid-celery to function as expected. See related issue tracked in pyramid-celery here https://github.com/sontek/pyramid_celery/issues/101
## Proposed UI/UX
<!--
Please provide your ideas for the API, CLI options,
configuration key names etc. that will be adjusted for this enhancement.
-->
Celery should add `context_settings` with `allow_extra_args`,
```
context_settings={
'allow_extra_args': True
}
```
to both the `shell` and `purge` commands. This will allow projects like pyramid-celery to hook in their own custom configuration options using the recommended (by Celery) `celery_app.user_options['preload'].add()` and/or `celery_app.user_options[option].add()` approaches. See [here](https://github.com/celery/celery/blob/e7b47a62d789557cf18ed0e56e2dfb99a51a62f7/docs/userguide/extending.rst#adding-new-command-line-options). This will also make `shell` and `purge` commands consistent with other commands like `worker`, `beat`, and `events` that already have these settings.
## Diagrams
<!--
Please include any diagrams that might be relevant
to the implementation of this enhancement such as:
* Class Diagrams
* Sequence Diagrams
* Activity Diagrams
You can drag and drop images into the text box to attach them to this issue.
-->
N/A
## Alternatives
<!--
If you have considered any alternative implementations
describe them in detail below.
-->
None
| Hey @dpdoughe :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
The inability to configure and run `purge` or `shell` commands with pyramid_celery persists with Celery v5.3.1
I am open to contributions in this regard. my expectation is adding adding missing tests for the newly proposed/missing features | 2023-07-14T06:23:15 |
celery/celery | 8,383 | celery__celery-8383 | [
"7715"
] | 78ab64eb70277f1cea9cc78bbfba087e577c7b7b | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -788,6 +788,7 @@ def apply(self, args=None, kwargs=None,
request = {
'id': task_id,
+ 'task': self.name,
'retries': retries,
'is_eager': True,
'logfile': logfile,
@@ -824,7 +825,7 @@ def apply(self, args=None, kwargs=None,
if isinstance(retval, Retry) and retval.sig is not None:
return retval.sig.apply(retries=retries + 1)
state = states.SUCCESS if ret.info is None else ret.info.state
- return EagerResult(task_id, retval, state, traceback=tb)
+ return EagerResult(task_id, self.name, retval, state, traceback=tb)
def AsyncResult(self, task_id, **kwargs):
"""Get AsyncResult instance for the specified task.
diff --git a/celery/result.py b/celery/result.py
--- a/celery/result.py
+++ b/celery/result.py
@@ -983,10 +983,11 @@ def restore(cls, id, backend=None, app=None):
class EagerResult(AsyncResult):
"""Result that we know has already been executed."""
- def __init__(self, id, ret_value, state, traceback=None):
+ def __init__(self, id, name, ret_value, state, traceback=None):
# pylint: disable=super-init-not-called
# XXX should really not be inheriting from AsyncResult
self.id = id
+ self._name = name
self._result = ret_value
self._state = state
self._traceback = traceback
@@ -1038,6 +1039,7 @@ def __repr__(self):
@property
def _cache(self):
return {
+ 'name': self._name,
'task_id': self.id,
'result': self._result,
'status': self._state,
| diff --git a/t/unit/tasks/test_chord.py b/t/unit/tasks/test_chord.py
--- a/t/unit/tasks/test_chord.py
+++ b/t/unit/tasks/test_chord.py
@@ -46,7 +46,7 @@ def join(self, propagate=True, **kwargs):
def _failed_join_report(self):
for value in self.value:
if isinstance(value, Exception):
- yield EagerResult('some_id', value, 'FAILURE')
+ yield EagerResult('some_id', 'test-task', value, 'FAILURE')
class TSRNoReport(TSR):
diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py
--- a/t/unit/tasks/test_result.py
+++ b/t/unit/tasks/test_result.py
@@ -136,7 +136,7 @@ def test_reduce_direct(self):
def test_children(self):
x = self.app.AsyncResult('1')
- children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
+ children = [EagerResult(str(i), 'test-task', i, states.SUCCESS) for i in range(3)]
x._cache = {'children': children, 'status': states.SUCCESS}
x.backend = Mock()
assert x.children
@@ -147,12 +147,12 @@ def test_propagates_for_parent(self):
x.backend = Mock(name='backend')
x.backend.get_task_meta.return_value = {}
x.backend.wait_for_pending.return_value = 84
- x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE)
+ x.parent = EagerResult(uuid(), 'test-task', KeyError('foo'), states.FAILURE)
with pytest.raises(KeyError):
x.get(propagate=True)
x.backend.wait_for_pending.assert_not_called()
- x.parent = EagerResult(uuid(), 42, states.SUCCESS)
+ x.parent = EagerResult(uuid(), 'test-task', 42, states.SUCCESS)
assert x.get(propagate=True) == 84
x.backend.wait_for_pending.assert_called()
@@ -172,7 +172,7 @@ def test_get_children(self):
def test_build_graph_get_leaf_collect(self):
x = self.app.AsyncResult('1')
x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None}
- c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
+ c = [EagerResult(str(i), 'test-task', i, states.SUCCESS) for i in range(3)]
x.iterdeps = Mock()
x.iterdeps.return_value = (
(None, x),
@@ -194,7 +194,7 @@ def test_build_graph_get_leaf_collect(self):
def test_iterdeps(self):
x = self.app.AsyncResult('1')
- c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)]
+ c = [EagerResult(str(i), 'test-task', i, states.SUCCESS) for i in range(3)]
x._cache = {'status': states.SUCCESS, 'result': None, 'children': c}
for child in c:
child.backend = Mock()
@@ -945,13 +945,13 @@ def test_wait_raises(self):
assert res.wait(propagate=False)
def test_wait(self):
- res = EagerResult('x', 'x', states.RETRY)
+ res = EagerResult('x', 'test-task', 'x', states.RETRY)
res.wait()
assert res.state == states.RETRY
assert res.status == states.RETRY
def test_forget(self):
- res = EagerResult('x', 'x', states.RETRY)
+ res = EagerResult('x', 'test-task', 'x', states.RETRY)
res.forget()
def test_revoke(self):
@@ -962,7 +962,7 @@ def test_revoke(self):
def test_get_sync_subtask_option(self, task_join_will_block):
task_join_will_block.return_value = True
tid = uuid()
- res_subtask_async = EagerResult(tid, 'x', 'x', states.SUCCESS)
+ res_subtask_async = EagerResult(tid, 'test-task', 'x', 'x', states.SUCCESS)
with pytest.raises(RuntimeError):
res_subtask_async.get()
res_subtask_async.get(disable_sync_subtasks=False)
| EagerResult doesn't seem to poplate name
Sometimes I run my tasks with `CELERY_TASK_ALWAYS_EAGER` to aid debugging, it seems like the `name` property of EagerResult isn't populated, which makes this sort of investigation more tricky.
I have some code I use to list my tasks in a django app, and part of this is to grab the task name.
When running eagerly, the EagerResult task status is SUCCESS, so I would have expected `name` to be available at this point.
| Hey @stuaxo :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
do you have any failing test/implementation detail in mind to share?
I'll see what I can find, I'm no longer working at the organisation* I did that project for, but it is open source so I still have that work and can share it :)
*I'm a contractor and move from place to place.
BTW, I am a contractor too
I really wish that every contract had a budget to put towards open source projects they use - I did raise this @ the last place, but probably not in the right places.
The furthest I've got is submitting patches to projects used. | 2023-07-20T23:55:58 |
celery/celery | 8,427 | celery__celery-8427 | [
"8421"
] | 2cde29d9fb6a8f8f805bec5d97b36bc930bcb52f | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1704,7 +1704,7 @@ def _prepared(self, tasks, partial_args, group_id, root_id, app,
generator: A generator for the unrolled group tasks.
The generator yields tuples of the form ``(task, AsyncResult, group_id)``.
"""
- for task in tasks:
+ for index, task in enumerate(tasks):
if isinstance(task, CallableSignature):
# local sigs are always of type Signature, and we
# clone them to make sure we don't modify the originals.
@@ -1721,7 +1721,7 @@ def _prepared(self, tasks, partial_args, group_id, root_id, app,
else:
if partial_args and not task.immutable:
task.args = tuple(partial_args) + tuple(task.args)
- yield task, task.freeze(group_id=group_id, root_id=root_id), group_id
+ yield task, task.freeze(group_id=group_id, root_id=root_id, group_index=index), group_id
def _apply_tasks(self, tasks, producer=None, app=None, p=None,
add_to_parent=None, chord=None,
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1519,6 +1519,18 @@ def apply_chord_incr_with_sleep(self, *args, **kwargs):
result = c()
assert result.get(timeout=TIMEOUT) == 4
+ def test_chord_order(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ inputs = [i for i in range(10)]
+
+ c = chord((identity.si(i) for i in inputs), identity.s())
+ result = c()
+ assert result.get() == inputs
+
@pytest.mark.xfail(reason="async_results aren't performed in async way")
def test_redis_subscribed_channels_leak(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
| [5.3.0] Task results order is not preserved anymore
# Checklist
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [x] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
#### Related Issues
- https://github.com/celery/celery/pull/6218 (original implementation of order preservation in Redis)
- https://github.com/celery/celery/pull/7460 (introduction of the regression, in https://github.com/celery/celery/pull/7460/commits/563269a790e3b6273aa17cb7d72296b70f772109#diff-3a80ff45da16a11b96e26a63973d7d490187a68ddc1949e2dfd7fd090b208841R1254)
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.0 (also reproducing on master, commit [2cde29d9fb6a8f8f805bec5d97b36bc930bcb52f](https://github.com/celery/celery/commit/2cde29d9fb6a8f8f805bec5d97b36bc930bcb52f))
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.3.1 (emerald-rush) kombu:5.3.1 py:3.10.12
billiard:4.1.0 py-amqp:5.1.1
platform -> system:Linux arch:64bit, ELF
kernel version:6.1.30-0-virt imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://redis:6379/
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.7.0
- **Minimal Celery Version**: 5.3.0 (or commit [9e324caaa6b175d8e51d3582378b78757e66a12d](https://github.com/celery/celery/commit/9e324caaa6b175d8e51d3582378b78757e66a12d) in `master` brancb)
- **Minimal Kombu Version**: tested with kombu 5.3.0
- **Minimal Broker Version**: tested with RabbitMQ 3.9.25
- **Minimal Result Backend Version**: tested with Redis 5.0.14
- **Minimal OS and/or Kernel Version**: tested on Docker aarch64
- **Minimal Broker Client Version**: tested with `amqp==5.1.1`
- **Minimal Result Backend Client Version**: tested with `redis-cli==4.6.0`
### Python Packages
<!-- Please fill the contents of pip freeze below -->
(ran `pip freeze` in the integration test Celery container)
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
alabaster==0.7.13
amqp==5.1.1
async-timeout==4.0.2
attrs==23.1.0
azure-core==1.28.0
azure-storage-blob==12.17.0
Babel==2.12.1
backports.zoneinfo==0.2.1
billiard==4.1.0
boto3==1.28.18
botocore==1.31.18
bump2version==1.0.1
bumpversion==0.6.0
cachetools==5.3.1
cassandra-driver==3.28.0
celery==5.3.1
certifi==2023.7.22
cffi==1.15.1
cfgv==3.3.1
chardet==5.2.0
charset-normalizer==3.2.0
click==8.1.6
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
colorama==0.4.6
couchbase==4.1.6
coverage==7.2.7
cryptography==41.0.2
DateTime==5.2
distlib==0.3.7
dnspython==2.4.1
docutils==0.19
elasticsearch==7.17.9
ephem==4.1.4
eventlet==0.33.3
exceptiongroup==1.1.2
filelock==3.12.2
flake8==6.1.0
flake8-docstrings==1.7.0
flakeplus==1.1.0
future==0.18.3
geomet==0.2.1.post1
gevent==23.7.0
greenlet==2.0.2
identify==2.5.26
idna==3.4
imagesize==1.4.1
importlib-metadata==6.8.0
iniconfig==2.0.0
isodate==0.6.1
isort==5.12.0
Jinja2==3.1.2
jmespath==1.0.1
kombu==5.3.1
livereload==2.6.3
MarkupSafe==2.1.3
mccabe==0.7.0
mock==5.1.0
moto==4.1.14
msgpack==1.0.5
mypy==1.4.1
mypy-extensions==1.0.0
nodeenv==1.8.0
packaging==23.1
platformdirs==3.10.0
pluggy==1.2.0
pre-commit==3.3.3
prompt-toolkit==3.0.39
pyArango==2.0.2
pycodestyle==2.11.0
pycouchdb==1.14.2
pycparser==2.21
pycurl==7.45.2
pydocstyle==6.3.0
pydocumentdb==2.3.5
pyflakes==3.1.0
Pygments==2.15.1
pylibmc==1.6.3
pymongo==4.4.1
pyproject-api==1.5.3
pytest==7.4.0
pytest-celery==0.0.0
pytest-click==1.1.0
pytest-cov==4.1.0
pytest-github-actions-annotate-failures==0.2.0
pytest-order==1.1.0
pytest-rerunfailures==12.0
pytest-subtests==0.11.0
pytest-timeout==2.1.0
python-consul2==0.1.5
python-dateutil==2.8.2
python-memcached==1.59
pytz==2023.3
PyYAML==6.0.1
redis==4.6.0
requests==2.31.0
responses==0.23.3
s3transfer==0.6.1
six==1.16.0
snowballstemmer==2.2.0
softlayer-messaging==1.0.3
Sphinx==5.3.0
sphinx-autobuild==2021.3.14
sphinx-celery==2.0.0
sphinx-click==4.4.0
sphinx-testing==1.0.1
sphinx2rst==1.1.0
sphinxcontrib-applehelp==1.0.4
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==2.0.19
tblib==2.0.0
tomli==2.0.1
tornado==6.3.2
tox==4.6.4
types-PyYAML==6.0.12.11
typing_extensions==4.7.1
tzdata==2023.3
Unipath==1.1
urllib3==1.26.16
vine==5.0.0
virtualenv==20.24.2
wcwidth==0.2.6
Werkzeug==2.3.6
xmltodict==0.13.0
zipp==3.16.2
zope.event==5.0
zope.interface==6.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
- https://github.com/backmarket-oss/celery/pull/1 adds an integration test that validate that the results are in the same order as the tasks when using `chord`, ending up failing (based on `master` branch)
- https://github.com/backmarket-oss/celery/pull/2 includes the same test, but forces the consumption of `results` generator, effectively setting `group_index` and incidentally making the integration test pass (although it breaks another test)
# Expected Behavior
Results are received in the callback in the same order as the created tasks when using `chord`.
So if we have tasks `[1, 2, 3]` finishing in order `[3, 1, 2]`, we receive `[1, 2, 3]` in the callback.
# Actual Behavior
Results are received in the callback in the order the tasks finish.
So if we have tasks `[1, 2, 3]` finishing in order `[3, 1, 2]`, we receive `[3, 1, 2]` in the callback.
# Investigation
As shown in https://github.com/backmarket-oss/celery/pull/2, it is possible for the callback to receive tasks in the correct order by forcing `results` generator to be executed.
While this code's purpose is only to demonstrate the issue, digging a bit more shows that the generator does not seem to be consumed when it should, leading up to not setting up `group_index`, which is used by the backend to order the results based on the order the tasks where created.
Checking the history, it looks like the regression was introduced in https://github.com/celery/celery/pull/7460, and more exactly [here](https://github.com/celery/celery/pull/7460/commits/563269a790e3b6273aa17cb7d72296b70f772109#diff-3a80ff45da16a11b96e26a63973d7d490187a68ddc1949e2dfd7fd090b208841R1254).
While the `zip` itself was unnecessary, it actually forced [the generator](https://github.com/celery/celery/blob/2cde29d9fb6a8f8f805bec5d97b36bc930bcb52f/celery/canvas.py#L1862-L1869) to be executed, so even if it looked innocent to remove this code, it does mean that we do not set the `group_index` by executing the generator anymore.
Note that I would be happy to provide a pull request to properly fix the regression, but I'm not sure about the best way to fix the issue.
I believe that we do not want to force the execution of the generator in this part of the code (which could explain why the commit breaks another test), but if you think this is, or know where would be the best place to fix this, I'd be happy to provide a pull request.
| 2023-08-07T15:51:01 |
|
celery/celery | 8,432 | celery__celery-8432 | [
"8431",
"8431"
] | 7b4c4c3938385a994c346f6fa80ce87f4efc0001 | diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -182,7 +182,8 @@ def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Store return value and state of an executed task."""
meta = self._get_result_meta(result=self.encode(result), state=state,
- traceback=traceback, request=request)
+ traceback=traceback, request=request,
+ format_date=False)
# Add the _id for mongodb
meta['_id'] = task_id
| diff --git a/t/unit/backends/test_base.py b/t/unit/backends/test_base.py
--- a/t/unit/backends/test_base.py
+++ b/t/unit/backends/test_base.py
@@ -176,6 +176,30 @@ def test_get_result_meta_with_none(self):
assert meta['kwargs'] == kwargs
assert meta['queue'] == 'celery'
+ def test_get_result_meta_format_date(self):
+ import datetime
+ self.app.conf.result_extended = True
+ b1 = BaseBackend(self.app)
+ args = ['a', 'b']
+ kwargs = {'foo': 'bar'}
+
+ request = Context(args=args, kwargs=kwargs)
+ meta = b1._get_result_meta(result={'fizz': 'buzz'},
+ state=states.SUCCESS, traceback=None,
+ request=request, format_date=True)
+ assert isinstance(meta['date_done'], str)
+
+ self.app.conf.result_extended = True
+ b2 = BaseBackend(self.app)
+ args = ['a', 'b']
+ kwargs = {'foo': 'bar'}
+
+ request = Context(args=args, kwargs=kwargs)
+ meta = b2._get_result_meta(result={'fizz': 'buzz'},
+ state=states.SUCCESS, traceback=None,
+ request=request, format_date=False)
+ assert isinstance(meta['date_done'], datetime.datetime)
+
class test_BaseBackend_interface:
| Invalid format of 'date_done' field in celery.task_results with backend mongodb
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.1
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery -A test_celery_result report</code> Output:</b></summary>
<p>
```
software -> celery:5.3.1 (emerald-rush) kombu:5.3.1 py:3.8.16
billiard:4.1.0 redis:4.6.0
platform -> system:Linux arch:64bit, ELF
kernel version:5.19.0-46-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:mongodb
[...]
CELERY_BROKER_TRANSPORT_OPTIONS:
'socket_keepalive': True, 'socket_keepalive_options': {4: 600, 5: 60, 6: 5}}
CELERY_BROKER_URL: 'redis://redis.local:7000/0'
CELERY_INCLUDE: ['test_celery_result.tasks']
CELERY_QUEUE_NAME: 'test_celery_result'
CELERY_REDIS:
'host': 'redis.local', 'port': 7000}
[...]
is_overridden: <bound method Settings.is_overridden of <Settings "test_celery_result.settings">>
deprecated_settings: None
task_default_queue: 'test_celery_result'
enable_utc: False
result_backend: 'mongodb'
result_expires: datetime.timedelta(seconds=15)
mongodb_backend_settings:
'database': '********',
'host': ['mongo-replica'],
'port': 27017,
'taskmeta_collection': 'celery_task_result'}
beat_schedule:
'celery.backend_cleanup': { 'schedule': 60,
'task': 'celery.backend_cleanup'},
'dummy_task': {'schedule': 15, 'task': 'dummy_task'}}
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.6 or higher
- **Minimal Celery Version**: 4.3.0 or higher
- **Minimal Kombu Version**: Unknown
- **Minimal Broker Version**: Unknown
- **Minimal Result Backend Version**: Mongo 4.4 or higher
- **Minimal OS and/or Kernel Version**: Unknown
- **Minimal Broker Client Version**: Unknown
- **Minimal Result Backend Client Version**: pymongo 3.14 or higher
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
amqp==5.1.1
asgiref==3.7.2
async-timeout==4.0.2
backports.zoneinfo==0.2.1
billiard==4.1.0
bleach==6.0.0
celery==5.3.1
certifi==2023.7.22
cffi==1.15.1
charset-normalizer==3.2.0
click==8.1.6
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
cryptography==41.0.3
Django==3.2.20
django-cors-headers==4.2.0
django-environ==0.10.0
django-formset-js==0.5.0
django-jquery-js==3.1.1
django-redis-sessions==0.6.2
django-test-addons-adv==1.1.1
dnspython==2.4.1
idna==3.4
Jinja2==3.1.2
kombu==5.3.1
packaging==23.1
prompt-toolkit==3.0.39
pyasn1==0.5.0
pycparser==2.21
pyhcl==0.4.4
pymongo==4.4.1
python-dateutil==2.8.2
pytz==2022.1
PyYAML==6.0.1
redis==4.6.0
requests==2.31.0
sentinels==1.0.0
single-beat==0.6.3
six==1.16.0
sqlparse==0.4.4
types-PyYAML==6.0.12.11
typing_extensions==4.7.1
tzdata==2023.3
urllib3==1.26.16
vine==5.0.0
wcwidth==0.2.6
webencodings==0.5.1
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
<ul>
<li>1. Set up a celery project with mongodb as backend</li>
<li>2. Set <pre>app.conf.result_expires = timedelta(seconds=60)</pre></li>
<li>2. Set up a scheduled task <pre>app.conf.beat_schedule = {
"dummy_task": {
"task": "dummy_task",
"schedule": 15
},
}</pre></li>
<li>3. Start celery worker and celery beat</li>
<li>4. Open shell on mongodb backend and see that db.task_result.count() never resets to 0</li>
</ul>
</p>
</details>
# Expected Behavior
`task_result` collection on mongo database shoud be cleaned every 60s according to `result_expires` configuration
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
There is an issue with the format of field `date_done` in task_result collection. Task results meta are retrieved with the method `_get_result_meta` from `base.py` which argument `format_date` is set to `True` by default. `date_done` field will be converted from `datetime` object to `str` and then inserted as a string in mongodb database.
And so when `cleanup()` method is called on `MongoBackend`, it will compare `date_done` field with datetime object from `self.app.now()` and will never match.
```python
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
# self.app.now() return datetime object while date_done is stored as string
```
```
> db.task_result.findOne()
{
"_id" : "f16bd459-b858-4ae8-afb5-1ceab0e50326",
"status" : "SUCCESS",
"result" : "\"SUCCESS\"",
"traceback" : null,
"children" : [ ],
"date_done" : "2023-08-08T09:03:34.974924" // should be ISODate("2023-08-08T09:03:34.9749Z")
}
```
A simple fix would be to set `format_date` to `False` when calling `self.get_result_meta` in `MongoBackend._strore_result`
Invalid format of 'date_done' field in celery.task_results with backend mongodb
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.1
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery -A test_celery_result report</code> Output:</b></summary>
<p>
```
software -> celery:5.3.1 (emerald-rush) kombu:5.3.1 py:3.8.16
billiard:4.1.0 redis:4.6.0
platform -> system:Linux arch:64bit, ELF
kernel version:5.19.0-46-generic imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:mongodb
[...]
CELERY_BROKER_TRANSPORT_OPTIONS:
'socket_keepalive': True, 'socket_keepalive_options': {4: 600, 5: 60, 6: 5}}
CELERY_BROKER_URL: 'redis://redis.local:7000/0'
CELERY_INCLUDE: ['test_celery_result.tasks']
CELERY_QUEUE_NAME: 'test_celery_result'
CELERY_REDIS:
'host': 'redis.local', 'port': 7000}
[...]
is_overridden: <bound method Settings.is_overridden of <Settings "test_celery_result.settings">>
deprecated_settings: None
task_default_queue: 'test_celery_result'
enable_utc: False
result_backend: 'mongodb'
result_expires: datetime.timedelta(seconds=15)
mongodb_backend_settings:
'database': '********',
'host': ['mongo-replica'],
'port': 27017,
'taskmeta_collection': 'celery_task_result'}
beat_schedule:
'celery.backend_cleanup': { 'schedule': 60,
'task': 'celery.backend_cleanup'},
'dummy_task': {'schedule': 15, 'task': 'dummy_task'}}
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.6 or higher
- **Minimal Celery Version**: 4.3.0 or higher
- **Minimal Kombu Version**: Unknown
- **Minimal Broker Version**: Unknown
- **Minimal Result Backend Version**: Mongo 4.4 or higher
- **Minimal OS and/or Kernel Version**: Unknown
- **Minimal Broker Client Version**: Unknown
- **Minimal Result Backend Client Version**: pymongo 3.14 or higher
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
amqp==5.1.1
asgiref==3.7.2
async-timeout==4.0.2
backports.zoneinfo==0.2.1
billiard==4.1.0
bleach==6.0.0
celery==5.3.1
certifi==2023.7.22
cffi==1.15.1
charset-normalizer==3.2.0
click==8.1.6
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
cryptography==41.0.3
Django==3.2.20
django-cors-headers==4.2.0
django-environ==0.10.0
django-formset-js==0.5.0
django-jquery-js==3.1.1
django-redis-sessions==0.6.2
django-test-addons-adv==1.1.1
dnspython==2.4.1
idna==3.4
Jinja2==3.1.2
kombu==5.3.1
packaging==23.1
prompt-toolkit==3.0.39
pyasn1==0.5.0
pycparser==2.21
pyhcl==0.4.4
pymongo==4.4.1
python-dateutil==2.8.2
pytz==2022.1
PyYAML==6.0.1
redis==4.6.0
requests==2.31.0
sentinels==1.0.0
single-beat==0.6.3
six==1.16.0
sqlparse==0.4.4
types-PyYAML==6.0.12.11
typing_extensions==4.7.1
tzdata==2023.3
urllib3==1.26.16
vine==5.0.0
wcwidth==0.2.6
webencodings==0.5.1
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
<ul>
<li>1. Set up a celery project with mongodb as backend</li>
<li>2. Set <pre>app.conf.result_expires = timedelta(seconds=60)</pre></li>
<li>2. Set up a scheduled task <pre>app.conf.beat_schedule = {
"dummy_task": {
"task": "dummy_task",
"schedule": 15
},
}</pre></li>
<li>3. Start celery worker and celery beat</li>
<li>4. Open shell on mongodb backend and see that db.task_result.count() never resets to 0</li>
</ul>
</p>
</details>
# Expected Behavior
`task_result` collection on mongo database shoud be cleaned every 60s according to `result_expires` configuration
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
There is an issue with the format of field `date_done` in task_result collection. Task results meta are retrieved with the method `_get_result_meta` from `base.py` which argument `format_date` is set to `True` by default. `date_done` field will be converted from `datetime` object to `str` and then inserted as a string in mongodb database.
And so when `cleanup()` method is called on `MongoBackend`, it will compare `date_done` field with datetime object from `self.app.now()` and will never match.
```python
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
# self.app.now() return datetime object while date_done is stored as string
```
```
> db.task_result.findOne()
{
"_id" : "f16bd459-b858-4ae8-afb5-1ceab0e50326",
"status" : "SUCCESS",
"result" : "\"SUCCESS\"",
"traceback" : null,
"children" : [ ],
"date_done" : "2023-08-08T09:03:34.974924" // should be ISODate("2023-08-08T09:03:34.9749Z")
}
```
A simple fix would be to set `format_date` to `False` when calling `self.get_result_meta` in `MongoBackend._strore_result`
| 2023-08-10T14:27:37 |
|
celery/celery | 8,446 | celery__celery-8446 | [
"8433"
] | 372a7a38c1dcf5f893e78ef034b864099fed35bb | diff --git a/celery/worker/consumer/consumer.py b/celery/worker/consumer/consumer.py
--- a/celery/worker/consumer/consumer.py
+++ b/celery/worker/consumer/consumer.py
@@ -153,6 +153,10 @@ class Consumer:
restart_count = -1 # first start is the same as a restart
+ #: This flag will be turned off after the first failed
+ #: connection attempt.
+ first_connection_attempt = True
+
class Blueprint(bootsteps.Blueprint):
"""Consumer blueprint."""
@@ -337,7 +341,8 @@ def start(self):
except recoverable_errors as exc:
# If we're not retrying connections, we need to properly shutdown or terminate
# the Celery main process instead of abruptly aborting the process without any cleanup.
- is_connection_loss_on_startup = self.restart_count == 0
+ is_connection_loss_on_startup = self.first_connection_attempt
+ self.first_connection_attempt = False
connection_retry_type = self._get_connection_retry_type(is_connection_loss_on_startup)
connection_retry = self.app.conf[connection_retry_type]
if not connection_retry:
@@ -488,13 +493,17 @@ def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP):
# Remember that the connection is lazy, it won't establish
# until needed.
- # If broker_connection_retry_on_startup is not set, revert to broker_connection_retry
- # to determine whether connection retries are disabled.
# TODO: Rely only on broker_connection_retry_on_startup to determine whether connection retries are disabled.
# We will make the switch in Celery 6.0.
+ retry_disabled = False
+
if self.app.conf.broker_connection_retry_on_startup is None:
+ # If broker_connection_retry_on_startup is not set, revert to broker_connection_retry
+ # to determine whether connection retries are disabled.
+ retry_disabled = not self.app.conf.broker_connection_retry
+
warnings.warn(
CPendingDeprecationWarning(
f"The broker_connection_retry configuration setting will no longer determine\n"
@@ -502,16 +511,23 @@ def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP):
f"If you wish to retain the existing behavior for retrying connections on startup,\n"
f"you should set broker_connection_retry_on_startup to {self.app.conf.broker_connection_retry}.")
)
+ else:
+ if self.first_connection_attempt:
+ retry_disabled = not self.app.conf.broker_connection_retry_on_startup
+ else:
+ retry_disabled = not self.app.conf.broker_connection_retry
- if not self.app.conf.broker_connection_retry and not self.app.conf.broker_connection_retry_on_startup:
+ if retry_disabled:
# Retry disabled, just call connect directly.
conn.connect()
+ self.first_connection_attempt = False
return conn
conn = conn.ensure_connection(
_error_handler, self.app.conf.broker_connection_max_retries,
callback=maybe_shutdown,
)
+ self.first_connection_attempt = False
return conn
def _flush_events(self):
| diff --git a/t/unit/worker/test_consumer.py b/t/unit/worker/test_consumer.py
--- a/t/unit/worker/test_consumer.py
+++ b/t/unit/worker/test_consumer.py
@@ -422,8 +422,11 @@ def test_cancel_long_running_tasks_on_connection_loss__warning(self):
@pytest.mark.parametrize("broker_connection_retry", [True, False])
@pytest.mark.parametrize("broker_connection_retry_on_startup", [None, False])
- def test_ensure_connected(self, subtests, broker_connection_retry, broker_connection_retry_on_startup):
+ @pytest.mark.parametrize("first_connection_attempt", [True, False])
+ def test_ensure_connected(self, subtests, broker_connection_retry, broker_connection_retry_on_startup,
+ first_connection_attempt):
c = self.get_consumer()
+ c.first_connection_attempt = first_connection_attempt
c.app.conf.broker_connection_retry_on_startup = broker_connection_retry_on_startup
c.app.conf.broker_connection_retry = broker_connection_retry
@@ -457,9 +460,7 @@ def test_start_raises_connection_error(self,
is_connection_loss_on_startup,
caplog, subtests):
c = self.get_consumer()
- # in order to reproduce the actual behavior: if this is the startup, then restart count has not been
- # incremented yet, and is therefore -1.
- c.restart_count = -1 if is_connection_loss_on_startup else 1
+ c.first_connection_attempt = True if is_connection_loss_on_startup else False
c.app.conf['broker_connection_retry'] = False
c.app.conf['broker_connection_retry_on_startup'] = broker_connection_retry_on_startup
c.blueprint.start.side_effect = ConnectionError()
| Flags broker_connection_retry_on_startup & broker_connection_retry aren’t reliable
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [x] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.x
# Steps to Reproduce
This issue covers multiple cases that originate in a central bug.
The flag [broker_connection_retry_on_startup](https://docs.celeryq.dev/en/stable/userguide/configuration.html#broker-connection-retry-on-startup) is using https://github.com/celery/celery/blob/2cde29d9fb6a8f8f805bec5d97b36bc930bcb52f/celery/worker/consumer/consumer.py#L340 to determine if the connection is happening at startup. This is incorrect as it does not reliably validate this condition, which causes multiple use-cases to fail.
As part of the implementation of this flag in regard to [broker_connection_retry](https://docs.celeryq.dev/en/stable/userguide/configuration.html#broker-connection-retry), this flag also suffers bugs from this situation.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.8
- **Minimal Celery Version**: 5.3
- **Minimal Kombu Version**: 5.3
- **Minimal Broker Version**: `latest` rabbitmq or `latest` redis
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
## Minimally Reproducible Test Case 1
<details>
<p>
1. Turn off the broker container.
2. set `broker_connection_retry_on_startup` True.
3. set `broker_connection_retry` False.
4. Run celery worker.
4.1 Wait for the connection retry 1/100…
5. Turn on the broker container.
5.1. Wait for connection to broker.
6. Turn off the broker container.
</p>
</details>
# Expected Behavior
Worker should shut down.
# Actual Behavior
Worker retries to connect.
## Minimally Reproducible Test Case 2
<details>
<p>
1. Turn off the broker container.
2. set `broker_connection_retry_on_startup` False.
3. set `broker_connection_retry` True.
4. Run celery worker.
</p>
</details>
# Expected Behavior
Worker should shut down.
# Actual Behavior
Worker retries to connect.
## Minimally Reproducible Test Case 3
<details>
<p>
1. Turn on the broker container.
2. set `broker_connection_retry_on_startup` True.
3. set `broker_connection_retry` False.
4. Run celery worker.
5. Turn off the broker container.
</p>
</details>
# Expected Behavior
Worker should shut down.
# Actual Behavior
Worker retries to connect.
## Minimally Reproducible Test Case 4
<details>
<p>
1. Turn on the broker container.
2. set `broker_connection_retry_on_startup` False.
3. set `broker_connection_retry` True.
4. Run celery worker.
5. Turn off the broker container.
</p>
</details>
# Expected Behavior
Worker retries to connect.
# Actual Behavior
Worker shuts down
# Potential Fix
To fix all of these cases, a potential fix can be adding a flag to the consumer that will determine this exact _startup_ condition, and then use it where `broker_connection_retry_on_startup` is used so on startup it will respect `broker_connection_retry_on_startup` and afterward `broker_connection_retry`, unless `broker_connection_retry_on_startup` is `None`, in which case `broker_connection_retry` will also determine the startup condition.
| 2023-08-19T15:08:00 |
|
celery/celery | 8,462 | celery__celery-8462 | [
"8461"
] | 8ae0b229596cc8aeea4fb71020d9358a59338e08 | diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -136,7 +136,8 @@ def convert(self, value, param, ctx):
cls=CeleryOption,
is_flag=True,
help_group="Global Options",
- help="Skip Django core checks on startup.")
+ help="Skip Django core checks on startup. Setting the SKIP_CHECKS environment "
+ "variable to any non-empty string will have the same effect.")
@click.pass_context
def celery(ctx, app, broker, result_backend, loader, config, workdir,
no_color, quiet, version, skip_checks):
@@ -158,7 +159,7 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
if skip_checks:
- os.environ['CELERY_SKIP_CHECKS'] = skip_checks
+ os.environ['CELERY_SKIP_CHECKS'] = 'true'
ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
quiet=quiet)
| diff --git a/t/unit/bin/test_worker.py b/t/unit/bin/test_worker.py
--- a/t/unit/bin/test_worker.py
+++ b/t/unit/bin/test_worker.py
@@ -1,3 +1,6 @@
+import os
+from unittest.mock import patch
+
import pytest
from click.testing import CliRunner
@@ -18,3 +21,15 @@ def test_cli(isolated_cli_runner: CliRunner):
catch_exceptions=False
)
assert res.exit_code == 1, (res, res.stdout)
+
+
+def test_cli_skip_checks(isolated_cli_runner: CliRunner):
+ Logging._setup = True # To avoid hitting the logging sanity checks
+ with patch.dict(os.environ, clear=True):
+ res = isolated_cli_runner.invoke(
+ celery,
+ ["-A", "t.unit.bin.proj.app", "--skip-checks", "worker", "--pool", "solo"],
+ catch_exceptions=False,
+ )
+ assert res.exit_code == 1, (res, res.stdout)
+ assert os.environ["CELERY_SKIP_CHECKS"] == "true", "should set CELERY_SKIP_CHECKS"
diff --git a/t/unit/fixups/test_django.py b/t/unit/fixups/test_django.py
--- a/t/unit/fixups/test_django.py
+++ b/t/unit/fixups/test_django.py
@@ -272,7 +272,7 @@ def test_validate_models(self, patching, module):
f.django_setup.reset_mock()
run_checks.reset_mock()
- patching.setenv('CELERY_SKIP_CHECKS', True)
+ patching.setenv('CELERY_SKIP_CHECKS', 'true')
f.validate_models()
f.django_setup.assert_called_with()
run_checks.assert_not_called()
| Running worker with --skip-checks option fails with a TypeError
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [x] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #7581 (feature request that led to the `--skip-checks` option being added)
- #7859 (PR that added the option)
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.1 (emerald-rush)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
This is on the Docker dev container. The results are not substantially different from the system on which I encountered the failure, aside from a massive and mostly irrelevant Django settings dump.
```
developer@dc2346ef89d6:~/celery$ celery --config t.integration.test_worker_config report
software -> celery:5.3.1 (emerald-rush) kombu:5.3.1 py:3.8.18
billiard:4.1.0 py-amqp:5.1.1
platform -> system:Linux arch:64bit
kernel version:5.15.49-linuxkit-pr imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
broker_connection_retry: False
broker_connection_retry_on_startup: False
broker_connection_timeout: 0
broker_url: 'amqp://guest:********@foobar:1234//'
worker_log_color: False
worker_redirect_stdouts: False
deprecated_settings: None
```
</p>
</details>
# Steps to Reproduce
Run a worker with the `--skip-checks` option or with`CELERY_SKIP_CHECKS=1` (or any non-empty value) in the environment.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: 5.3.1 (emerald-rush)
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
alabaster==0.7.13
amqp==5.1.1
async-timeout==4.0.3
attrs==23.1.0
azure-core==1.29.3
azure-storage-blob==12.17.0
Babel==2.12.1
backports.zoneinfo==0.2.1
billiard==4.1.0
boto3==1.28.36
botocore==1.31.36
bump2version==1.0.1
bumpversion==0.6.0
cachetools==5.3.1
cassandra-driver==3.28.0
celery==5.3.1
certifi==2023.7.22
cffi==1.15.1
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.2.0
click==8.1.7
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
colorama==0.4.6
couchbase==4.1.8
coverage==7.3.0
cryptography==41.0.3
DateTime==5.2
distlib==0.3.7
dnspython==2.4.2
docutils==0.19
elasticsearch==7.17.9
ephem==4.1.4
eventlet==0.33.3
exceptiongroup==1.1.3
filelock==3.12.3
flake8==6.1.0
flake8-docstrings==1.7.0
flakeplus==1.1.0
future==0.18.3
geomet==0.2.1.post1
gevent==23.7.0
greenlet==2.0.2
identify==2.5.27
idna==3.4
imagesize==1.4.1
importlib-metadata==6.8.0
iniconfig==2.0.0
isodate==0.6.1
isort==5.12.0
Jinja2==3.1.2
jmespath==1.0.1
kombu==5.3.1
livereload==2.6.3
MarkupSafe==2.1.3
mccabe==0.7.0
mock==5.1.0
moto==4.2.0
msgpack==1.0.5
mypy==1.5.0
mypy-extensions==1.0.0
nodeenv==1.8.0
packaging==23.1
platformdirs==3.10.0
pluggy==1.3.0
pre-commit==3.3.3
prompt-toolkit==3.0.39
pyArango==2.0.2
pycodestyle==2.11.0
pycouchdb==1.14.2
pycparser==2.21
pycurl==7.45.2
pydocstyle==6.3.0
pydocumentdb==2.3.5
pyflakes==3.1.0
Pygments==2.16.1
pylibmc==1.6.3
pymongo==4.5.0
pyproject-api==1.5.4
pytest==7.4.0
pytest-celery==0.0.0
pytest-click==1.1.0
pytest-cov==4.1.0
pytest-github-actions-annotate-failures==0.2.0
pytest-order==1.1.0
pytest-rerunfailures==12.0
pytest-subtests==0.11.0
pytest-timeout==2.1.0
python-consul2==0.1.5
python-dateutil==2.8.2
python-memcached==1.59
pytz==2023.3
PyYAML==6.0.1
redis==4.6.0
requests==2.31.0
responses==0.23.3
s3transfer==0.6.2
six==1.16.0
snowballstemmer==2.2.0
softlayer-messaging==1.0.3
Sphinx==5.3.0
sphinx-autobuild==2021.3.14
sphinx-celery==2.0.0
sphinx-click==4.4.0
sphinx-testing==1.0.1
sphinx2rst==1.1.0
sphinxcontrib-applehelp==1.0.4
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==2.0.20
tblib==2.0.0
tomli==2.0.1
tornado==6.3.3
tox==4.10.0
types-PyYAML==6.0.12.11
typing_extensions==4.7.1
tzdata==2023.3
Unipath==1.1
urllib3==1.26.16
vine==5.0.0
virtualenv==20.24.3
wcwidth==0.2.6
Werkzeug==2.3.7
xmltodict==0.13.0
zipp==3.16.2
zope.event==5.0
zope.interface==6.0
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
In a shell opened with `docker compose run --rm celery bash`, the following illustrates the failure.
```
developer@dc2346ef89d6:~/celery$ celery --skip-checks --config t.integration.test_worker_config report
Traceback (most recent call last):
File "/home/developer/.pyenv/versions/3.8.18/bin/celery", line 8, in <module>
sys.exit(main())
File "/home/developer/celery/celery/__main__.py", line 15, in main
sys.exit(_main())
File "/home/developer/celery/celery/bin/celery.py", line 235, in main
return celery(auto_envvar_prefix="CELERY")
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1685, in invoke
super().invoke(ctx)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/developer/celery/celery/bin/celery.py", line 161, in celery
os.environ['CELERY_SKIP_CHECKS'] = skip_checks
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/os.py", line 680, in __setitem__
value = self.encodevalue(value)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/os.py", line 750, in encode
raise TypeError("str expected, not %s" % type(value).__name__)
TypeError: str expected, not bool
```
Alternatively, for any non-empty value of `CELERY_SKIP_CHECKS`, same result:
```
developer@dc2346ef89d6:~/celery$ CELERY_SKIP_CHECKS=1 celery --config t.integration.test_worker_config worker
Traceback (most recent call last):
File "/home/developer/.pyenv/versions/3.8.18/bin/celery", line 8, in <module>
sys.exit(main())
File "/home/developer/celery/celery/__main__.py", line 15, in main
sys.exit(_main())
File "/home/developer/celery/celery/bin/celery.py", line 235, in main
return celery(auto_envvar_prefix="CELERY")
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1685, in invoke
super().invoke(ctx)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/developer/celery/celery/bin/celery.py", line 161, in celery
os.environ['CELERY_SKIP_CHECKS'] = skip_checks
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/os.py", line 680, in __setitem__
value = self.encodevalue(value)
File "/home/developer/.pyenv/versions/3.8.18/lib/python3.8/os.py", line 750, in encode
raise TypeError("str expected, not %s" % type(value).__name__)
TypeError: str expected, not bool
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Expected either the `CELERY_SKIP_CHECKS=1` or the `--skip-checks` option to start up the celery worker with Django's initial checks disabled. In the context of the minimal test case, Celery should start up, show its welcome message, then exit with error due to inability to contact the message broker.
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Celery immediately exits with a `TypeError`. See the minimally reproducible test case section for backtraces.
| 2023-08-29T14:07:40 |
|
celery/celery | 8,463 | celery__celery-8463 | [
"8456"
] | af1d7a18ec98b32e70cc17e3e17ee82d17efbd14 | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -653,7 +653,7 @@ def stamp_links(self, visitor, append_stamps=False, **headers):
# Stamp all of the callbacks of this signature
headers = deepcopy(non_visitor_headers)
- for link in self.options.get('link', []) or []:
+ for link in maybe_list(self.options.get('link')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
@@ -668,7 +668,7 @@ def stamp_links(self, visitor, append_stamps=False, **headers):
# Stamp all of the errbacks of this signature
headers = deepcopy(non_visitor_headers)
- for link in self.options.get('link_error', []) or []:
+ for link in maybe_list(self.options.get('link_error')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
@@ -1016,9 +1016,9 @@ def unchain_tasks(self):
# Clone chain's tasks assigning signatures from link_error
# to each task and adding the chain's links to the last task.
tasks = [t.clone() for t in self.tasks]
- for sig in self.options.get('link', []):
+ for sig in maybe_list(self.options.get('link')) or []:
tasks[-1].link(sig)
- for sig in self.options.get('link_error', []):
+ for sig in maybe_list(self.options.get('link_error')) or []:
for task in tasks:
task.link_error(sig)
return tasks
@@ -2272,7 +2272,7 @@ def link_error(self, errback):
applied to the body.
"""
if self.app.conf.task_allow_error_cb_on_chord_header:
- for task in self.tasks:
+ for task in maybe_list(self.tasks) or []:
task.link_error(errback.clone(immutable=True))
else:
# Once this warning is removed, the whole method needs to be refactored to:
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -2962,6 +2962,43 @@ def test_flag_allow_error_cb_on_chord_header_on_upgraded_chord(self, manager, su
# Cleanup
redis_connection.delete(errback_key)
+ def test_upgraded_chord_link_error_with_header_errback_enabled(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+ redis_connection = get_redis_connection()
+
+ manager.app.conf.task_allow_error_cb_on_chord_header = True
+
+ body_msg = 'chord body called'
+ body_key = 'echo_body'
+ body_sig = redis_echo.si(body_msg, redis_key=body_key)
+
+ errback_msg = 'errback called'
+ errback_key = 'echo_errback'
+ errback_sig = redis_echo.si(errback_msg, redis_key=errback_key)
+
+ redis_connection.delete(errback_key, body_key)
+
+ sig = chain(
+ identity.si(42),
+ group(
+ fail.si(),
+ fail.si(),
+ ),
+ body_sig,
+ ).on_error(errback_sig)
+
+ with subtests.test(msg='Error propagates from failure in header'):
+ with pytest.raises(ExpectedException):
+ sig.apply_async().get(timeout=TIMEOUT)
+
+ redis_connection.delete(errback_key, body_key)
+
class test_signature_serialization:
"""
@@ -3441,3 +3478,27 @@ def on_signature(self, sig, **headers) -> dict:
res = stamped_task.delay()
res.get(timeout=TIMEOUT)
assert assertion_result
+
+ def test_stamp_canvas_with_dictionary_link(self, manager, subtests):
+ class CustomStampingVisitor(StampingVisitor):
+ def on_signature(self, sig, **headers) -> dict:
+ return {"on_signature": 42}
+
+ with subtests.test("Stamp canvas with dictionary link"):
+ canvas = identity.si(42)
+ canvas.options["link"] = dict(identity.si(42))
+ canvas.stamp(visitor=CustomStampingVisitor())
+
+ def test_stamp_canvas_with_dictionary_link_error(self, manager, subtests):
+ class CustomStampingVisitor(StampingVisitor):
+ def on_signature(self, sig, **headers) -> dict:
+ return {"on_signature": 42}
+
+ with subtests.test("Stamp canvas with dictionary link error"):
+ canvas = fail.si()
+ canvas.options["link_error"] = dict(fail.si())
+ canvas.stamp(visitor=CustomStampingVisitor())
+
+ with subtests.test(msg='Expect canvas to fail'):
+ with pytest.raises(ExpectedException):
+ canvas.apply_async().get(timeout=TIMEOUT)
| `AttributeError` thrown when using `link_error`/`on_error` on a rewritten chain when `task_allow_error_cb_on_chord_header` is enabled
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [x] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [x] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- This appears to be a similar class of issue as #4848 and #5265, since it covers some of the same ground
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.1 (emerald-rush)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.3.1 (emerald-rush) kombu:5.3.1 py:3.10.7
billiard:4.1.0 redis:5.0.0
platform -> system:Darwin arch:64bit
kernel version:22.6.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:redis://localhost/
broker_url: 'redis://localhost:6379//'
result_backend: 'redis://localhost/'
deprecated_settings: None
task_allow_error_cb_on_chord_header: True
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.10.x
- **Minimal Celery Version**: 5.3.1
- **Minimal Kombu Version**: N/A
- **Minimal Broker Version**: N/A
- **Minimal Result Backend Version**: N/A
- **Minimal OS and/or Kernel Version**: N/A (we've reproduced with our own deployment on Linux, and locally on macOS)
- **Minimal Broker Client Version**: N/A
- **Minimal Result Backend Client Version**: N/A
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
amqp==5.1.1
async-timeout==4.0.3
billiard==4.1.0
celery @ git+https://github.com/celery/celery.git@8ae0b229596cc8aeea4fb71020d9358a59338e08
click==8.1.7
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
kombu==5.3.1
prompt-toolkit==3.0.39
python-dateutil==2.8.2
redis==5.0.0
six==1.16.0
tzdata==2023.3
vine==5.0.0
wcwidth==0.2.6
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery import Celery, chain, group, chord
app = Celery('tasks', broker='redis://localhost/', backend='redis://localhost/')
app.conf.task_allow_error_cb_on_chord_header = True
@app.task
def fake_work_unit(x):
raise Exception
@app.task
def nothing_work_unit():
return "hi"
@app.task
def fake_error_handler(*args):
print("oops")
def test():
f = app.signature(
"test_celery.nothing_work_unit",
immutable=True,
)
f2 = app.signature(
"test_celery.fake_error_handler",
)
# This formulation works correctly...
# chain(
# f.on_error(f2),
# group(
# app.signature(
# "test_celery.fake_work_unit",
# args=(1,),
# immutable=True,
# ),
# app.signature(
# "test_celery.fake_work_unit",
# args=(2,),
# immutable=True,
# ),
# ).on_error(f2),
# f.on_error(f2),
# ).apply_async()
# ...but not this one.
chain(
f,
group(
app.signature(
"test_celery.fake_work_unit",
args=(1,),
immutable=True,
),
app.signature(
"test_celery.fake_work_unit",
args=(2,),
immutable=True,
),
),
f,
).on_error(f2).apply_async()
```
Run `celery -A test_celery worker` and then in a different shell:
```
from test_celery import test
test()
```
Then comment out the second formulation and uncomment the first. Notice that it works as intended.
This test case also works if you disable `task_allow_error_cb_on_chord_header`, or remove the `on_error`/`link_error` call, but either will result in the error handler being invoked.
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Either formulation described should work. The second formulation should not be throwing `AttributeError`.
The intended result is:
* `test_celery.nothing_work_unit` succeeds.
* One of the `test_celery.fake_work_unit` tasks runs and raises an exception, causing `fake_error_handler` to be called.
# Actual Behavior
When trying to run the second formulation, I get an `AttributeError` and no task is ever run:
```
>>> from test_celery import test
>>> test()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/asteinborn/Ramp/misc/celery-test/test_celery.py", line 68, in test
).on_error(f2).apply_async()
File "/Users/asteinborn/Ramp/misc/celery-test/venv/lib/python3.10/site-packages/celery/canvas.py", line 1035, in apply_async
return self.run(args, kwargs, app=app, **(
File "/Users/asteinborn/Ramp/misc/celery-test/venv/lib/python3.10/site-packages/celery/canvas.py", line 1060, in run
tasks, results_from_prepare = self.prepare_steps(
File "/Users/asteinborn/Ramp/misc/celery-test/venv/lib/python3.10/site-packages/celery/canvas.py", line 1246, in prepare_steps
task.link_error(errback)
File "/Users/asteinborn/Ramp/misc/celery-test/venv/lib/python3.10/site-packages/celery/canvas.py", line 2276, in link_error
task.link_error(errback.clone(immutable=True))
AttributeError: 'str' object has no attribute 'link_error'
```
| Thank you for the informed report!
I'll check it out in the next few days. | 2023-08-30T12:51:32 |
celery/celery | 8,486 | celery__celery-8486 | [
"7715",
"8472"
] | 14892abbb8cf80d7abcf41f4a48c049d84f69f74 | diff --git a/celery/app/task.py b/celery/app/task.py
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -788,6 +788,7 @@ def apply(self, args=None, kwargs=None,
request = {
'id': task_id,
+ 'task': self.name,
'retries': retries,
'is_eager': True,
'logfile': logfile,
@@ -824,7 +825,7 @@ def apply(self, args=None, kwargs=None,
if isinstance(retval, Retry) and retval.sig is not None:
return retval.sig.apply(retries=retries + 1)
state = states.SUCCESS if ret.info is None else ret.info.state
- return EagerResult(task_id, retval, state, traceback=tb)
+ return EagerResult(task_id, retval, state, traceback=tb, name=self.name)
def AsyncResult(self, task_id, **kwargs):
"""Get AsyncResult instance for the specified task.
diff --git a/celery/result.py b/celery/result.py
--- a/celery/result.py
+++ b/celery/result.py
@@ -983,13 +983,14 @@ def restore(cls, id, backend=None, app=None):
class EagerResult(AsyncResult):
"""Result that we know has already been executed."""
- def __init__(self, id, ret_value, state, traceback=None):
+ def __init__(self, id, ret_value, state, traceback=None, name=None):
# pylint: disable=super-init-not-called
# XXX should really not be inheriting from AsyncResult
self.id = id
self._result = ret_value
self._state = state
self._traceback = traceback
+ self._name = name
self.on_ready = promise()
self.on_ready(self)
@@ -1042,6 +1043,7 @@ def _cache(self):
'result': self._result,
'status': self._state,
'traceback': self._traceback,
+ 'name': self._name,
}
@property
| diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py
--- a/t/unit/tasks/test_result.py
+++ b/t/unit/tasks/test_result.py
@@ -967,6 +967,13 @@ def test_get_sync_subtask_option(self, task_join_will_block):
res_subtask_async.get()
res_subtask_async.get(disable_sync_subtasks=False)
+ def test_populate_name(self):
+ res = EagerResult('x', 'x', states.SUCCESS, None, 'test_task')
+ assert res.name == 'test_task'
+
+ res = EagerResult('x', 'x', states.SUCCESS, name='test_task_named_argument')
+ assert res.name == 'test_task_named_argument'
+
class test_tuples:
diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -1432,6 +1432,7 @@ def test_apply(self):
assert e.successful()
assert e.ready()
+ assert e.name == 't.unit.tasks.test_tasks.increment_counter'
assert repr(e).startswith('<EagerResult:')
f = self.raising.apply()
@@ -1441,6 +1442,21 @@ def test_apply(self):
with pytest.raises(KeyError):
f.get()
+ def test_apply_eager_populates_request_task(self):
+ task_to_apply = self.task_check_request_context
+ with patch.object(
+ task_to_apply.request_stack, "push",
+ wraps=task_to_apply.request_stack.push,
+ ) as mock_push:
+ task_to_apply.apply()
+
+ mock_push.assert_called_once()
+
+ request = mock_push.call_args[0][0]
+
+ assert request.is_eager is True
+ assert request.task == 't.unit.tasks.test_tasks.task_check_request_context'
+
def test_apply_simulates_delivery_info(self):
task_to_apply = self.task_check_request_context
with patch.object(
| EagerResult doesn't seem to poplate name
Sometimes I run my tasks with `CELERY_TASK_ALWAYS_EAGER` to aid debugging, it seems like the `name` property of EagerResult isn't populated, which makes this sort of investigation more tricky.
I have some code I use to list my tasks in a django app, and part of this is to grab the task name.
When running eagerly, the EagerResult task status is SUCCESS, so I would have expected `name` to be available at this point.
5.3.2/3 has BREAKING change on EagerResult
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [ ] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [ ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.3
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: N/A or Unknown
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
a patch release to not have a breaking change
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
https://github.com/celery/celery/commit/1c363876147325a196c474e757e355c451a0cdff#diff-05689f277021b9af9d8314849c9d938db0f5a42e932169a116463ef91ae9af78R986
```
TypeError
EagerResult.__init__() missing 1 required positional argument: 'state'
```
| Hey @stuaxo :wave:,
Thank you for opening an issue. We will get back to you as soon as we can.
Also, check out our [Open Collective](https://opencollective.com/celery) and consider backing us - every little helps!
We also offer priority support for our sponsors.
If you require immediate assistance please consider sponsoring us.
do you have any failing test/implementation detail in mind to share?
I'll see what I can find, I'm no longer working at the organisation* I did that project for, but it is open source so I still have that work and can share it :)
*I'm a contractor and move from place to place.
BTW, I am a contractor too
I really wish that every contract had a budget to put towards open source projects they use - I did raise this @ the last place, but probably not in the right places.
The furthest I've got is submitting patches to projects used.
| 2023-09-04T22:38:21 |
celery/celery | 8,489 | celery__celery-8489 | [
"6608"
] | b6a5bdb8b698dbe2a0848e34f76133f2950c5a82 | diff --git a/celery/bin/control.py b/celery/bin/control.py
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -1,5 +1,6 @@
"""The ``celery control``, ``. inspect`` and ``. status`` programs."""
from functools import partial
+from typing import Literal
import click
from kombu.utils.json import dumps
@@ -39,18 +40,69 @@ def _consume_arguments(meta, method, args):
args[:] = args[i:]
-def _compile_arguments(action, args):
- meta = Panel.meta[action]
+def _compile_arguments(command, args):
+ meta = Panel.meta[command]
arguments = {}
if meta.args:
arguments.update({
- k: v for k, v in _consume_arguments(meta, action, args)
+ k: v for k, v in _consume_arguments(meta, command, args)
})
if meta.variadic:
arguments.update({meta.variadic: args})
return arguments
+_RemoteControlType = Literal['inspect', 'control']
+
+
+def _verify_command_name(type_: _RemoteControlType, command: str) -> None:
+ choices = _get_commands_of_type(type_)
+
+ if command not in choices:
+ command_listing = ", ".join(choices)
+ raise click.UsageError(
+ message=f'Command {command} not recognized. Available {type_} commands: {command_listing}',
+ )
+
+
+def _list_option(type_: _RemoteControlType):
+ def callback(ctx: click.Context, param, value) -> None:
+ if not value:
+ return
+ choices = _get_commands_of_type(type_)
+
+ formatter = click.HelpFormatter()
+
+ with formatter.section(f'{type_.capitalize()} Commands'):
+ command_list = []
+ for command_name, info in choices.items():
+ if info.signature:
+ command_preview = f'{command_name} {info.signature}'
+ else:
+ command_preview = command_name
+ command_list.append((command_preview, info.help))
+ formatter.write_dl(command_list)
+ ctx.obj.echo(formatter.getvalue(), nl=False)
+ ctx.exit()
+
+ return click.option(
+ '--list',
+ is_flag=True,
+ help=f'List available {type_} commands and exit.',
+ expose_value=False,
+ is_eager=True,
+ callback=callback,
+ )
+
+
+def _get_commands_of_type(type_: _RemoteControlType) -> dict:
+ command_name_info_pairs = [
+ (name, info) for name, info in Panel.meta.items()
+ if info.type == type_ and info.visible
+ ]
+ return dict(sorted(command_name_info_pairs))
+
+
@click.command(cls=CeleryCommand)
@click.option('-t',
'--timeout',
@@ -96,10 +148,8 @@ def status(ctx, timeout, destination, json, **kwargs):
@click.command(cls=CeleryCommand,
context_settings={'allow_extra_args': True})
[email protected]("action", type=click.Choice([
- name for name, info in Panel.meta.items()
- if info.type == 'inspect' and info.visible
-]))
[email protected]('command')
+@_list_option('inspect')
@click.option('-t',
'--timeout',
cls=CeleryOption,
@@ -121,19 +171,19 @@ def status(ctx, timeout, destination, json, **kwargs):
help='Use json as output format.')
@click.pass_context
@handle_preload_options
-def inspect(ctx, action, timeout, destination, json, **kwargs):
- """Inspect the worker at runtime.
+def inspect(ctx, command, timeout, destination, json, **kwargs):
+ """Inspect the workers by sending them the COMMAND inspect command.
Availability: RabbitMQ (AMQP) and Redis transports.
"""
+ _verify_command_name('inspect', command)
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
- arguments = _compile_arguments(action, ctx.args)
+ arguments = _compile_arguments(command, ctx.args)
inspect = ctx.obj.app.control.inspect(timeout=timeout,
destination=destination,
callback=callback)
- replies = inspect._request(action,
- **arguments)
+ replies = inspect._request(command, **arguments)
if not replies:
raise CeleryCommandException(
@@ -153,10 +203,8 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
@click.command(cls=CeleryCommand,
context_settings={'allow_extra_args': True})
[email protected]("action", type=click.Choice([
- name for name, info in Panel.meta.items()
- if info.type == 'control' and info.visible
-]))
[email protected]('command')
+@_list_option('control')
@click.option('-t',
'--timeout',
cls=CeleryOption,
@@ -178,16 +226,17 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
help='Use json as output format.')
@click.pass_context
@handle_preload_options
-def control(ctx, action, timeout, destination, json):
- """Workers remote control.
+def control(ctx, command, timeout, destination, json):
+ """Send the COMMAND control command to the workers.
Availability: RabbitMQ (AMQP), Redis, and MongoDB transports.
"""
+ _verify_command_name('control', command)
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
args = ctx.args
- arguments = _compile_arguments(action, args)
- replies = ctx.obj.app.control.broadcast(action, timeout=timeout,
+ arguments = _compile_arguments(command, args)
+ replies = ctx.obj.app.control.broadcast(command, timeout=timeout,
destination=destination,
callback=callback,
reply=True,
diff --git a/t/unit/bin/proj/app_with_custom_cmds.py b/t/unit/bin/proj/app_with_custom_cmds.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/proj/app_with_custom_cmds.py
@@ -0,0 +1,24 @@
+from celery import Celery
+from celery.worker.control import control_command, inspect_command
+
+
+@control_command(
+ args=[('a', int), ('b', int)],
+ signature='a b',
+)
+def custom_control_cmd(state, a, b):
+ """Ask the workers to reply with a and b."""
+ return {'ok': f'Received {a} and {b}'}
+
+
+@inspect_command(
+ args=[('x', int)],
+ signature='x',
+)
+def custom_inspect_cmd(state, x):
+ """Ask the workers to reply with x."""
+ return {'ok': f'Received {x}'}
+
+
+app = Celery(set_as_current=False)
+app.config_from_object('t.integration.test_worker_config')
| diff --git a/t/unit/app/test_preload_cli.py b/t/unit/app/test_preload_cli.py
--- a/t/unit/app/test_preload_cli.py
+++ b/t/unit/app/test_preload_cli.py
@@ -1,34 +1,41 @@
+import contextlib
+from typing import Tuple
+from unittest.mock import patch
+
+import pytest
from click.testing import CliRunner
from celery.bin.celery import celery
-def test_preload_options(isolated_cli_runner: CliRunner):
- # Verify commands like shell and purge can accept preload options.
- # Projects like Pyramid-Celery's ini option should be valid preload
- # options.
-
- # TODO: Find a way to run these separate invoke and assertions
- # such that order does not matter. Currently, running
- # the "t.unit.bin.proj.pyramid_celery_app" first seems
- # to result in cache or memoization of the option.
- # As a result, the expected exception is not raised when
- # the invoke on "t.unit.bin.proj.app" is run as a second
- # call.
[email protected](autouse=True)
+def reset_command_params_between_each_test():
+ with contextlib.ExitStack() as stack:
+ for command in celery.commands.values():
+ # We only need shallow copy -- preload options are appended to the list,
+ # existing options are kept as-is
+ params_copy = command.params[:]
+ patch_instance = patch.object(command, "params", params_copy)
+ stack.enter_context(patch_instance)
- res_without_preload = isolated_cli_runner.invoke(
- celery,
- ["-A", "t.unit.bin.proj.app", "purge", "-f", "--ini", "some_ini.ini"],
- catch_exceptions=True,
- )
+ yield
- assert "No such option: --ini" in res_without_preload.stdout
- assert res_without_preload.exit_code == 2
[email protected](
+ "subcommand_with_params",
+ [
+ ("purge", "-f"),
+ ("shell",),
+ ]
+)
+def test_preload_options(subcommand_with_params: Tuple[str, ...], isolated_cli_runner: CliRunner):
+ # Verify commands like shell and purge can accept preload options.
+ # Projects like Pyramid-Celery's ini option should be valid preload
+ # options.
res_without_preload = isolated_cli_runner.invoke(
celery,
- ["-A", "t.unit.bin.proj.app", "shell", "--ini", "some_ini.ini"],
- catch_exceptions=True,
+ ["-A", "t.unit.bin.proj.app", *subcommand_with_params, "--ini", "some_ini.ini"],
+ catch_exceptions=False,
)
assert "No such option: --ini" in res_without_preload.stdout
@@ -39,25 +46,11 @@ def test_preload_options(isolated_cli_runner: CliRunner):
[
"-A",
"t.unit.bin.proj.pyramid_celery_app",
- "purge",
- "-f",
+ *subcommand_with_params,
"--ini",
"some_ini.ini",
],
- catch_exceptions=True,
+ catch_exceptions=False,
)
- assert res_with_preload.exit_code == 0
-
- res_with_preload = isolated_cli_runner.invoke(
- celery,
- [
- "-A",
- "t.unit.bin.proj.pyramid_celery_app",
- "shell",
- "--ini",
- "some_ini.ini",
- ],
- catch_exceptions=True,
- )
- assert res_with_preload.exit_code == 0
+ assert res_with_preload.exit_code == 0, res_with_preload.stdout
diff --git a/t/unit/bin/test_control.py b/t/unit/bin/test_control.py
new file mode 100644
--- /dev/null
+++ b/t/unit/bin/test_control.py
@@ -0,0 +1,82 @@
+import os
+import re
+from unittest.mock import patch
+
+import pytest
+from click.testing import CliRunner
+
+from celery.bin.celery import celery
+from celery.platforms import EX_UNAVAILABLE
+
+_GLOBAL_OPTIONS = ['-A', 't.unit.bin.proj.app_with_custom_cmds', '--broker', 'memory://']
+_INSPECT_OPTIONS = ['--timeout', '0'] # Avoid waiting for the zero workers to reply
+
+
[email protected](autouse=True)
+def clean_os_environ():
+ # Celery modifies os.environ when given the CLI option --broker memory://
+ # This interferes with other tests, so we need to reset os.environ
+ with patch.dict(os.environ, clear=True):
+ yield
+
+
[email protected](
+ ('celery_cmd', 'custom_cmd'),
+ [
+ ('inspect', ('custom_inspect_cmd', '123')),
+ ('control', ('custom_control_cmd', '123', '456')),
+ ],
+)
+def test_custom_remote_command(celery_cmd, custom_cmd, isolated_cli_runner: CliRunner):
+ res = isolated_cli_runner.invoke(
+ celery,
+ [*_GLOBAL_OPTIONS, celery_cmd, *_INSPECT_OPTIONS, *custom_cmd],
+ catch_exceptions=False,
+ )
+ assert res.exit_code == EX_UNAVAILABLE, (res, res.stdout)
+ assert res.stdout.strip() == 'Error: No nodes replied within time constraint'
+
+
[email protected](
+ ('celery_cmd', 'remote_cmd'),
+ [
+ # Test nonexistent commands
+ ('inspect', 'this_command_does_not_exist'),
+ ('control', 'this_command_does_not_exist'),
+ # Test commands that exist, but are of the wrong type
+ ('inspect', 'custom_control_cmd'),
+ ('control', 'custom_inspect_cmd'),
+ ],
+)
+def test_unrecognized_remote_command(celery_cmd, remote_cmd, isolated_cli_runner: CliRunner):
+ res = isolated_cli_runner.invoke(
+ celery,
+ [*_GLOBAL_OPTIONS, celery_cmd, *_INSPECT_OPTIONS, remote_cmd],
+ catch_exceptions=False,
+ )
+ assert res.exit_code == 2, (res, res.stdout)
+ assert f'Error: Command {remote_cmd} not recognized. Available {celery_cmd} commands: ' in res.stdout
+
+
+_expected_inspect_regex = (
+ '\n custom_inspect_cmd x\\s+Ask the workers to reply with x\\.\n'
+)
+_expected_control_regex = (
+ '\n custom_control_cmd a b\\s+Ask the workers to reply with a and b\\.\n'
+)
+
+
[email protected](
+ ('celery_cmd', 'expected_regex'),
+ [
+ ('inspect', re.compile(_expected_inspect_regex, re.MULTILINE)),
+ ('control', re.compile(_expected_control_regex, re.MULTILINE)),
+ ],
+)
+def test_listing_remote_commands(celery_cmd, expected_regex, isolated_cli_runner: CliRunner):
+ res = isolated_cli_runner.invoke(
+ celery,
+ [*_GLOBAL_OPTIONS, celery_cmd, '--list'],
+ )
+ assert res.exit_code == 0, (res, res.stdout)
+ assert expected_regex.search(res.stdout)
| Celery 5 custom inspect commands doesn't work in the CLI
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
* [x] I have verified that the issue exists against the `master` branch of Celery.
* [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
* [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
* [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
* [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
* [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
* [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
* [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
* [ ] I have verified that the issue exists against the `master` branch of Celery.
* [ ] I have included the contents of ``pip freeze`` in the issue.
* [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
* [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
* [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
* [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
* [ ] I have tried reproducing the issue on more than one operating system.
* [ ] I have tried reproducing the issue on more than one workers pool.
* [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
* [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
* None
#### Possible Duplicates
* None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
software -> celery:5.0.5 (singularity) kombu:5.0.2 py:3.7.5
billiard:3.6.3.0 py-amqp:5.0.3
platform -> system:Linux arch:64bit, ELF
kernel version:4.4.0-19041-Microsoft imp:CPython
loader -> celery.loaders.default.Loader
settings -> transport:amqp results:disabled
deprecated_settings: None
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: N/A or Unknown
* **Minimal Celery Version**: 5
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
absl-py==0.9.0
aiohttp==3.6.2
aiomisc==11.0.0
amqp==5.0.3
appdirs==1.4.4
asgiref==3.2.10
asn1crypto==0.24.0
astor==0.8.1
async-timeout==3.0.1
atomicwrites==1.3.0
attrs==19.3.0
auth0-python==3.9.1
aws2-wrap==1.1.4
awscli==1.14.44
Babel==2.8.0
backcall==0.1.0
billiard==3.6.3.0
boto3==1.14.19
boto3-stubs==1.14.19.0
botocore==1.17.63
Brotli==1.0.9
cachetools==4.0.0
celery==5.0.5
certifi==2019.11.28
cffi==1.14.0
chardet==3.0.4
click==7.1.2
click-didyoumean==0.0.3
click-plugins==1.1.1
click-repl==0.1.6
colorama==0.3.7
colorlog==4.4.0
convertdate==2.2.2
cryptography==2.7
ddtrace==0.44.0
decorator==4.4.2
defusedxml==0.6.0
Django==3.1.2
django-celery-results==1.0.4
django-cors-headers==3.1.0
django-debug-toolbar==3.1.1
django-filter==2.1.0
django-prometheus==1.0.15
django-rest-auth==0.9.5
django-rest-framework-condition==0.1.1
djangorestframework==3.12.1
djangorestframework-jwt==1.11.0
docutils==0.15.2
ecdsa==0.15
ffmpeg-python==0.2.0
fitparse==1.1.0
flower==0.9.3
fusepy==3.0.1
future==0.18.2
gast==0.2.2
gitdb==4.0.2
gitdb2==3.0.0
GitPython==3.1.0
glfw==1.8.2
google-auth==1.11.3
google-auth-oauthlib==0.4.1
google-pasta==0.2.0
gprof2dot==2019.11.30
graphqlclient==0.2.4
grpcio==1.27.2
h5py==2.10.0
holidays==0.10.1
idna==2.6
imageio-ffmpeg==0.3.0
importlib-metadata==1.5.0
imutils==0.5.2
intervaltree==3.1.0
ipython==7.6.1
ipython-genutils==0.2.0
jedi==0.16.0
jmespath==0.9.5
joblib==0.14.1
Keras-Applications==1.0.8
Keras-Preprocessing==1.1.0
keyring==10.6.0
keyrings.alt==3.0
kombu==5.0.2
kubernetes==12.0.0
logzio-python-handler==2.0.13
lxml==4.4.2
Markdown==3.2.1
memory-profiler==0.57.0
more-itertools==8.2.0
multidict==4.7.5
munkres==1.1.2
mypy-boto3==1.14.19.0
mypy-boto3-cloudformation==1.14.19.0
mypy-boto3-dynamodb==1.14.19.0
mypy-boto3-ec2==1.14.19.0
mypy-boto3-lambda==1.14.19.0
mypy-boto3-rds==1.14.19.0
mypy-boto3-s3==1.14.19.0
mypy-boto3-sqs==1.14.19.0
numpy==1.17.0
oauthlib==3.1.0
olefile==0.45.1
opencv-python==3.4.5.20
opt-einsum==3.2.0
packaging==20.3
pandas==1.1.4
parso==0.6.2
pdfkit==0.6.1
pexpect==4.8.0
pickleshare==0.7.5
Pillow==7.1.2
pipdeptree==1.0.0
pluggy==0.13.1
pprofile==2.0.2
prometheus-client==0.7.1
prompt-toolkit==2.0.10
protobuf==3.11.3
psutil==5.7.0
psycopg2-binary==2.8.3
ptyprocess==0.6.0
py==1.8.1
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.20
pycrypto==2.6.1
pyee==7.0.4
Pygments==2.6.1
pygobject==3.26.1
PyJWT==1.7.1
PyMeeus==0.3.7
PyOpenGL==3.1.0
pyparsing==2.4.6
pyperclip==1.7.0
pyppeteer==0.2.2
pyquaternion==0.9.5
PySocks==1.7.1
pytest==4.6.5
pytest-django==3.10.0
pytest-django-ordering==1.2.0
pytest-profiling==1.7.0
python-apt==1.6.5+ubuntu0.2
python-dateutil==2.8.0
python-dotenv==0.10.3
python-http-client==3.2.6
python-jose==3.0.1
python-json-logger==0.1.11
python-memcached==1.59
python3-openid==3.1.0
pytz==2019.3
pyxdg==0.25
PyYAML==5.3.1
redis==3.3.11
requests==2.22.0
requests-oauthlib==1.3.0
retry==0.9.2
roman==2.0.0
rsa==4.0
s3transfer==0.3.3
scikit-fmm==2019.1.30
scikit-learn==0.23.1
scipy==1.3.1
SecretStorage==2.3.1
sendgrid==6.4.1
Shapely==1.6.4.post2
six==1.12.0
smmap==3.0.1
smmap2==3.0.1
social-auth-app-django==3.1.0
social-auth-core==3.2.0
sortedcontainers==2.3.0
sqlparse==0.3.1
starkbank-ecdsa==1.1.0
tenacity==6.3.1
tensorboard==2.0.2
tensorflow==2.0.0
tensorflow-estimator==2.0.1
termcolor==1.1.0
threadpoolctl==2.1.0
tornado==5.1.1
tqdm==4.56.0
traitlets==4.3.3
tripy==1.0.0
twilio==6.29.3
typing-extensions==3.7.4.3
unattended-upgrades==0.1
urllib3==1.25.8
uWSGI==2.0.18
vine==5.0.0
wcwidth==0.1.8
websocket-client==0.57.0
websockets==8.1
Werkzeug==1.0.0
wrapt==1.12.1
xgboost==0.90
xlrd==1.2.0
XlsxWriter==1.2.8
xxhash==1.3.0
yappi==1.0
yarl==1.4.2
zipp==3.1.0
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Hi, I think there is a bug with version 5+.
When using the guide in the docs for writing custom control commands and trying to run it from the CLI, it fails. When running this command from a python script, it is working (with broadcast). When using celery 4.4.3 on the same code, it also works.
The guide: https://docs.celeryproject.org/en/stable/userguide/workers.html#writing-your-own-remote-control-commands
the error:
$ celery -A route inspect current_prefetch_count
Usage: celery inspect [OPTIONS] [report|conf|query_task|clock|ping|stats|sched
uled|reserved|active|revoked|registered|objgraph|memsamp
le|memdump|active_queues]
Try 'celery inspect --help' for help.
Error: Invalid value for '[report|conf|query_task|clock|ping|stats|scheduled|reserved|active|revoked|registered|objgraph|memsample|memdump|active_queues]': invalid choice: current_prefetch_count. (choose from report, conf, query_task, clock, ping, stats, scheduled, rese
rved, active, revoked, registered, objgraph, memsample, memdump, active_queues)
| what you get when you run celery inspect --help
Usage: celery inspect [OPTIONS] [report|conf|query_task|clock|ping|stats|sched
uled|reserved|active|revoked|registered|objgraph|memsamp
le|memdump|active_queues]
Inspect the worker at runtime.
Availability: RabbitMQ (AMQP) and Redis transports.
Remote Control Options:
-t, --timeout FLOAT Timeout in seconds waiting for reply.
-d, --destination COMMA SEPARATED LIST
Comma separated list of destination node
names.
-j, --json Use json as output format.
Options:
--help Show this message and exit.
may be @thedrow can share some insight
I have a monitoring script that (among many things) uses inspection API and calls active_queues(). With Celery 5.1.2 the scripts fails to work with key error, as active_queues() returns nothing... 4.4.x works as expected...
The code I used to test is here: https://gitlab.com/dejan/ceex (here I keep all my CElery EXperiments). If you run `celery -A ceex.inspect_node_stats worker -l debug` and try to `celery -A ceex.inspect_node_stats inspect node_stats` with 5.1.2 you will see it does not work. Again, try with 4.4.7 - it does work.
@dejlek The page is not available
Oops, thanks - I did not know it was created as private repo... It is open to public now.
Any update on this?
Looking the code of the control.py
```
@click.argument("action", type=click.Choice([
name for name, info in Panel.meta.items()
if info.type == 'control' and info.visible
]))
```
this above decorator doesn't read the global Meta properly. I'm still trying to figure out why.
I encountered this bug today. This bug is caused by the fact that `Panel.meta.items()` is evaluated when the `bin/control.py` file is imported. This happens _after_ the command definitions in `worker/control.py`, but _before_ any of my custom commands are defined. All the modules in `bin/` are imported when the `celery` command is run, so I don't see any quickfix or workaround.
I guess you could move away from `click.Choice` and accept any string as the command name instead. We would then need to validate the given string manually – the user's Celery app will be set up by the time we reach the function body, so their custom commands will be registered by then. Maybe a new flag, e.g. `--list`, could print the available commands and exit.
Would this be an acceptable solution? I may try to implement it if desired.
This bug is basically preventing us from moving to new(er) Celery as we have few critical inspect and control commands that we use all the time.
> I encountered this bug today. This bug is caused by the fact that `Panel.meta.items()` is evaluated when the `bin/control.py` file is imported. This happens _after_ the command definitions in `worker/control.py`, but _before_ any of my custom commands are defined. All the modules in `bin/` are imported when the `celery` command is run, so I don't see any quickfix or workaround.
>
> I guess you could move away from `click.Choice` and accept any string as the command name instead. We would then need to validate the given string manually – the user's Celery app will be set up by the time we reach the function body, so their custom commands will be registered by then. Maybe a new flag, e.g. `--list`, could print the available commands and exit.
>
> Would this be an acceptable solution? I may try to implement it if desired.
you can come with a draft proof of concept PR with relevant test for review | 2023-09-07T13:46:57 |
celery/celery | 8,650 | celery__celery-8650 | [
"8540"
] | bad275039fac8bdf66e8d03928028227aef0f782 | diff --git a/celery/worker/control.py b/celery/worker/control.py
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -580,7 +580,7 @@ def autoscale(state, max=None, min=None):
def shutdown(state, msg='Got shutdown from remote', **kwargs):
"""Shutdown worker(s)."""
logger.warning(msg)
- raise WorkerShutdown(msg)
+ raise WorkerShutdown(0)
# -- Queues
| Celery exit with non-zero code after Warm Shutdown in Celery 5.3.x
### Discussed in https://github.com/celery/celery/discussions/8539
<div type='discussions-op-text'>
<sup>Originally posted by **cinesia** September 27, 2023</sup>
We upgraded recently **celery** from **5.2.7** to **5.3.4** and something changed in the default behaviour of a celery worker when it receives a warm shutdown.
Before the upgrade, the worker exited with zero code and now the worker exit with non-zero code (1).
The code it's the same and nothing changed except the package upgrade.
I succeed in reproducing the error in a clean environment where only celery is installed.
To reproduce the behaviour:
- Create a simple Celery worker
tasks.py
```python
from celery import Celery
app = Celery('tasks')
@app.task
def add(x, y):
return x + y
```
Dockerfile
```Dockerfile
# Use an official Python runtime as the base image
FROM python:3.9-slim
# Set the working directory in the container
WORKDIR /app
# Copy the dependencies file to the working directory (it has just a line with celery==...)
COPY requirements.txt .
# Install the dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy the rest of the application's code to the working directory
COPY . .
# Define the command to run your Celery worker
CMD ["celery", "--app=tasks", "worker", "--loglevel=info"]
```
docker-compose
```yaml
version: '3.7'
services:
# Deploy the broker.
rabbitmq_server:
image: rabbitmq:3-management
ports:
# Expose the port for the worker to add/get tasks
- 5672:5672
# OPTIONAL: Expose the GUI port
- 15672:15672
# Deploy the worker
worker:
# Build using the worker Dockerfile
build:
context: .
dockerfile: Dockerfile
# Need to access the database
# OPTIONAL: If your worker needs to access your db that is deployed locally, then make the network mode as host.
network_mode: host
# Pass the rabbitmq_uri as an environment variable in order to connect to our service
environment:
# NOTE: Below we are using 127.0.0.1 because this container will run on the host network, thus it will have access to the host network.
- CELERY_BROKER_URL=amqp://[email protected]:5672//
```
- Open a python console inside the celery container and send a shutdown
```python
import celery
app = celery.Celery("tasks")
app.control.shutdown()
```
If celery == 5.2.7 the container exit with code 0, if celery == 5.3.4 the container exit with code 1.
</div>
| If you can propose a fix, it would be great. we can discuss more on the PR.
what is the content of requirements.txt? @cinesia
> what is the content of requirements.txt? @cinesia
Just the version of celery I want to install. | 2023-11-19T00:15:07 |
|
celery/celery | 8,663 | celery__celery-8663 | [
"8662"
] | ac16f239985cf9248155b95788c4b6227f7f1b94 | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -958,6 +958,8 @@ def __or__(self, other):
if isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
+ if not isinstance(other, group):
+ return self.__or__(other)
# chain | group() -> chain
tasks = self.unchain_tasks()
if not tasks:
@@ -981,6 +983,13 @@ def __or__(self, other):
sig = self.clone()
sig.tasks[-1] = chord(
sig.tasks[-1], other, app=self._app)
+ # In the scenario where the second-to-last item in a chain is a chord,
+ # it leads to a situation where two consecutive chords are formed.
+ # In such cases, a further upgrade can be considered.
+ # This would involve chaining the body of the second-to-last chord with the last chord."
+ if len(sig.tasks) > 1 and isinstance(sig.tasks[-2], chord):
+ sig.tasks[-2].body = sig.tasks[-2].body | sig.tasks[-1]
+ sig.tasks = sig.tasks[:-1]
return sig
elif self.tasks and isinstance(self.tasks[-1], chord):
# CHAIN [last item is chord] -> chain with chord body.
| diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1037,6 +1037,65 @@ def test_freezing_chain_sets_id_of_last_task(self, manager):
c.freeze(last_task.id)
assert c.id == last_task.id
+ @pytest.mark.parametrize(
+ "group_last_task",
+ [False, True],
+ )
+ def test_chaining_upgraded_chords_mixed_canvas_protocol_2(
+ self, manager, subtests, group_last_task):
+ """ This test is built to reproduce the github issue https://github.com/celery/celery/issues/8662
+
+ The issue describes a canvas where a chain of groups are executed multiple times instead of once.
+ This test is built to reproduce the issue and to verify that the issue is fixed.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_key = 'echo_chamber'
+
+ c = chain(
+ group([
+ redis_echo.si('1', redis_key=redis_key),
+ redis_echo.si('2', redis_key=redis_key)
+ ]),
+ group([
+ redis_echo.si('3', redis_key=redis_key),
+ redis_echo.si('4', redis_key=redis_key),
+ redis_echo.si('5', redis_key=redis_key)
+ ]),
+ group([
+ redis_echo.si('6', redis_key=redis_key),
+ redis_echo.si('7', redis_key=redis_key),
+ redis_echo.si('8', redis_key=redis_key),
+ redis_echo.si('9', redis_key=redis_key)
+ ]),
+ redis_echo.si('Done', redis_key='Done') if not group_last_task else
+ group(redis_echo.si('Done', redis_key='Done')),
+ )
+
+ with subtests.test(msg='Run the chain and wait for completion'):
+ redis_connection.delete(redis_key, 'Done')
+ c.delay().get(timeout=TIMEOUT)
+ await_redis_list_message_length(1, redis_key='Done', timeout=10)
+
+ with subtests.test(msg='All tasks are executed once'):
+ actual = [
+ sig.decode('utf-8')
+ for sig in redis_connection.lrange(redis_key, 0, -1)
+ ]
+ expected = [str(i) for i in range(1, 10)]
+ with subtests.test(msg='All tasks are executed once'):
+ assert sorted(actual) == sorted(expected)
+
+ # Cleanup
+ redis_connection.delete(redis_key, 'Done')
+
class test_result_set:
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -571,6 +571,36 @@ def test_chain_of_chord_upgrade_on_chaining(self):
assert isinstance(new_chain, _chain)
assert isinstance(new_chain.tasks[0].body, chord)
+ @pytest.mark.parametrize(
+ "group_last_task",
+ [False, True],
+ )
+ def test_chain_of_chord_upgrade_on_chaining__protocol_2(
+ self, group_last_task):
+ c = chain(
+ group([self.add.s(i, i) for i in range(5)], app=self.app),
+ group([self.add.s(i, i) for i in range(10, 15)], app=self.app),
+ group([self.add.s(i, i) for i in range(20, 25)], app=self.app),
+ self.add.s(30) if not group_last_task else group(self.add.s(30),
+ app=self.app))
+ assert isinstance(c, _chain)
+ assert len(
+ c.tasks
+ ) == 1, "Consecutive chords should be further upgraded to a single chord."
+ assert isinstance(c.tasks[0], chord)
+
+ def test_chain_of_chord_upgrade_on_chaining__protocol_3(self):
+ c = chain(
+ chain([self.add.s(i, i) for i in range(5)]),
+ group([self.add.s(i, i) for i in range(10, 15)], app=self.app),
+ chord([signature('header')], signature('body'), app=self.app),
+ group([self.add.s(i, i) for i in range(20, 25)], app=self.app))
+ assert isinstance(c, _chain)
+ assert isinstance(
+ c.tasks[-1], chord
+ ), "Chord followed by a group should be upgraded to a single chord with chained body."
+ assert len(c.tasks) == 6
+
def test_apply_options(self):
class static(Signature):
| Duplicate executions and django-celery-results backend exceptions when chaining 4x number of groups
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [x] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- #5958
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.0 - 5.3.5
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: 3.11.4
- **Minimal Celery Version**: 5.3.5
- **Minimal Kombu Version**: 5.3.4
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: django-celery-results 2.5.1
- **Minimal OS and/or Kernel Version**: Debian 11
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
Django==4.2.7
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
</p>
</details>
## Minimally Reproducible Test Case
While working with a chain of groups in Celery with django-celery-results as result backend, I encountered an unexpected behavior leading to exceptions in the django-celery-results backend. The structure of my chain is as follows:
```
group1(task1, task2, task3, task4, task5, task6) |
group2(task7, task8, task9, task10, task11, task12, task13) |
group3(task14, task15, task16, task17) |
group4(task18)
```
# Expected Behavior
Chain executed without exception.
# Actual Behavior
During execution, `apply_chord` in the django-celery-results database backend is called multiple times with the same group_id, triggering exceptions.
# Attempts to Resolve
I tried modifying django-celery-results (https://github.com/celery/django-celery-results/pull/413) to prevent updating the chord counter when the group_id is the same. This change, however, resulted in task18 not being triggered.
Further modifications were made to accumulate the count of the chord counter when the group_id is the same. This led to tasks in group3 being executed 7 times (equal to the number of tasks in group2).
This issue seems to bear resemblance to https://github.com/celery/celery/issues/5958.
# Suggested Solution
Under the current behavior, these four groups are upgraded into two chords. A potential solution could be to connect the second chord to the end of the first chord's body in `prepare_steps`. This approach aligns with the following existing behaviors:
- PR #7919 connects a group following a chord to the end of the chord body.
- The `or` operator in class `_chord` (https://github.com/celery/celery/blob/main/celery/canvas.py#L2045) connects a task or chord following a chord to the end of the chord body.
Looking forward to guidance or suggestions for resolving this issue.
| 2023-11-23T04:45:42 |
|
celery/celery | 8,702 | celery__celery-8702 | [
"8678"
] | 17631f7eda712b688294ecb8fa53e4769fe2b1f9 | diff --git a/celery/canvas.py b/celery/canvas.py
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -2271,6 +2271,8 @@ def link_error(self, errback):
``False`` (the current default), then the error callback will only be
applied to the body.
"""
+ errback = maybe_signature(errback)
+
if self.app.conf.task_allow_error_cb_on_chord_header:
for task in maybe_list(self.tasks) or []:
task.link_error(errback.clone(immutable=True))
| diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1688,6 +1688,14 @@ def test_flag_allow_error_cb_on_chord_header_various_header_types(self):
errback = c.link_error(sig)
assert errback == sig
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_flag_allow_error_cb_on_chord_header_with_dict_callback(self):
+ self.app.conf.task_allow_error_cb_on_chord_header = True
+ c = chord(group(signature('th1'), signature('th2')), signature('tbody'))
+ errback_dict = dict(signature('tcb'))
+ errback = c.link_error(errback_dict)
+ assert errback == errback_dict
+
def test_chord__or__group_of_single_task(self):
""" Test chaining a chord to a group of a single task. """
c = chord([signature('header')], signature('body'))
| task with an errback throws `AttributeError` when replaced with a chord, when `task_allow_error_cb_on_chord_header` is set
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [x] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [x] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
https://github.com/celery/celery/issues/8456
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: 5.3.6 (emerald-rush)
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
software -> celery:5.3.6 (emerald-rush) kombu:5.3.4 py:3.8.10
billiard:4.2.0 redis:5.0.1
platform -> system:Darwin arch:64bit
kernel version:23.1.0 imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:redis results:redis://0.0.0.0:6479/
broker_url: 'redis://0.0.0.0:6479//'
result_backend: 'redis://0.0.0.0:6479/'
deprecated_settings: None
task_allow_error_cb_on_chord_header: True
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: 5.3.1
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
aiohttp==3.8.5
aiosignal==1.3.1
amqp==5.2.0
asgiref==3.6.0
astroid==2.15.2
async-timeout==4.0.3
attrs==22.2.0
autopep8==1.5.5
backports.zoneinfo==0.2.1
bandit==1.7.5
billiard==4.2.0
black==23.7.0
boto3-stubs==1.19.12.post1
botocore-stubs==1.29.107
celery==5.3.6
celery-stubs==0.1.3
certifi==2022.12.7
cfgv==3.3.1
charset-normalizer==3.1.0
click==8.1.7
click-didyoumean==0.3.0
click-plugins==1.1.1
click-repl==0.3.0
coreapi==2.3.3
coreschema==0.0.4
data-science-types==0.2.23
dill==0.3.6
distlib==0.3.6
Django==4.2
django-filter-stubs==0.1.3
django-stubs==1.15.0
django-stubs-ext==0.8.0
djangorestframework==3.14.0
djangorestframework-stubs==1.9.1
drf-yasg==1.20.3
factory-boy==3.2.1
Faker==18.3.4
filelock==3.10.7
flake8==3.8.4
frozenlist==1.4.0
fuzzywuzzy-stubs==0.0.1
gitdb==4.0.10
GitPython==3.1.31
graphene-stubs==0.15
identify==2.5.22
idna==3.4
inflection==0.5.1
isort==5.12.0
itypes==1.2.0
jedi==0.17.2
Jinja2==3.1.2
kombu==5.3.4
lazy-object-proxy==1.9.0
markdown-it-py==2.2.0
MarkupSafe==2.1.2
mccabe==0.6.1
mdurl==0.1.2
multidict==6.0.4
mypy==1.1.1
mypy-extensions==1.0.0
nodeenv==1.7.0
packaging==23.0
parso==0.7.1
pathspec==0.11.1
pbr==5.11.1
pip-licenses==3.5.5
platformdirs==3.2.0
pluggy==1.0.0
pre-commit==2.7.1
prompt-toolkit==3.0.41
PTable==0.9.2
pycodestyle==2.6.0
pydocstyle==6.3.0
pyflakes==2.2.0
Pygments==2.14.0
pylint==2.17.2
python-dateutil==2.8.2
python-jsonrpc-server==0.4.0
python-language-server==0.36.2
pytoolconfig==1.2.5
pytz==2023.3
PyYAML==6.0
ratelimit-stubs==2.2.1
redis==5.0.1
regex==2019.11.1
requests==2.28.2
rich==13.3.3
rope==1.7.0
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.7
six==1.16.0
smmap==5.0.0
snowballstemmer==2.2.0
sqlparse==0.4.3
stevedore==5.0.0
toml==0.10.2
tomli==2.0.1
tomlkit==0.11.7
types-awscrt==0.16.13.post1
types-beautifulsoup4==4.10.20
types-docutils==0.19.1.7
types-pytz==2023.3.0.0
types-PyYAML==5.4.12
types-requests==2.25.12
types-setuptools==67.3.0.2
types-six==0.1.9
typing_extensions==4.5.0
tzdata==2023.3
ujson==5.7.0
uritemplate==4.1.1
urllib3==1.26.15
vine==5.1.0
virtualenv==20.21.0
wcwidth==0.2.12
wrapt==1.15.0
yapf==0.32.0
yarl==1.8.2
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
-->
<details>
<p>
```python
from celery import Celery, chain, chord, signature, group
app = Celery(
"celery_bug", backend="redis://0.0.0.0:6479/", broker="redis://0.0.0.0:6479/"
)
app.conf.task_allow_error_cb_on_chord_header = True
@app.task(bind=True)
def orig_task(self, arg):
chord_headers = group([chord_header.s(arg=f"arg{i}") for i in range(5)])
replacement_chord = chord(chord_headers, chord_body.s())
return self.replace(replacement_chord)
@app.task
def chord_header(arg):
return f"header: {arg}"
@app.task
def chord_body(arg):
return f"body: {arg}]"
@app.task
def handle_error(*args, **kwargs):
print(f"handle error called with args {args} kwargs {kwargs}")
def main():
print(f"hello world")
res = orig_task.apply_async(args=["spam"], link_error=handle_error.s())
print(f"RESULT: {res.get()}")
if __name__ == "__main__":
main()
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
I would expect the `orig_task` to be replaced with `replacement_chord` and give expected output.
This is the expected output that I do see if `task_allow_error_cb_on_chord_header` is `False`, or if the `orig_task` is called without the `link_error=` callback:
```
$ poetry run python celery_bug.py
hello world
RESULT: body: ['header: arg0', 'header: arg1', 'header: arg2', 'header: arg3', 'header: arg4']]
```
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
Instead, I get this `AttributeError`:
```
$ poetry run python celery_bug.py
hello world
Traceback (most recent call last):
File "celery_bug.py", line 39, in <module>
main()
File "celery_bug.py", line 35, in main
print(f"RESULT: {res.get()}")
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/celery/result.py", line 251, in get
return self.backend.wait_for_pending(
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/celery/backends/asynchronous.py", line 223, in wait_for_pending
return result.maybe_throw(callback=callback, propagate=propagate)
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/celery/result.py", line 365, in maybe_throw
self.throw(value, self._to_remote_traceback(tb))
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/celery/result.py", line 358, in throw
self.on_ready.throw(*args, **kwargs)
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/vine/promises.py", line 235, in throw
reraise(type(exc), exc, tb)
File "/Users/robertgalloway/Library/Caches/pypoetry/virtualenvs/rpg-play-aJQQ1jqR-py3.8/lib/python3.8/site-packages/vine/utils.py", line 27, in reraise
raise value
AttributeError: 'dict' object has no attribute 'clone'
```
I suspect the error is related to this code in the `_chord.link_error()` method:
```python
if self.app.conf.task_allow_error_cb_on_chord_header:
for task in maybe_list(self.tasks) or []:
task.link_error(errback.clone(immutable=True))
```
| previous related fix https://github.com/celery/celery/pull/8463 | 2023-12-07T12:10:06 |
celery/celery | 8,806 | celery__celery-8806 | [
"2907"
] | 8f389997887232500d4aa1a2b0ae0c7320c4c84a | diff --git a/celery/beat.py b/celery/beat.py
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -568,11 +568,11 @@ def _create_schedule(self):
for _ in (1, 2):
try:
self._store['entries']
- except KeyError:
+ except (KeyError, UnicodeDecodeError, TypeError):
# new schedule db
try:
self._store['entries'] = {}
- except KeyError as exc:
+ except (KeyError, UnicodeDecodeError, TypeError) as exc:
self._store = self._destroy_open_corrupted_schedule(exc)
continue
else:
| diff --git a/t/unit/app/test_beat.py b/t/unit/app/test_beat.py
--- a/t/unit/app/test_beat.py
+++ b/t/unit/app/test_beat.py
@@ -2,7 +2,7 @@
import sys
from datetime import datetime, timedelta, timezone
from pickle import dumps, loads
-from unittest.mock import Mock, call, patch
+from unittest.mock import MagicMock, Mock, call, patch
import pytest
@@ -669,6 +669,38 @@ def test_remove_db(self, remove):
with pytest.raises(OSError):
s._remove_db()
+ def test_create_schedule_corrupted(self):
+ """
+ Test that any decoding errors that might happen when opening beat-schedule.db are caught
+ """
+ s = create_persistent_scheduler()[0](app=self.app,
+ schedule_filename='schedule')
+ s._store = MagicMock()
+ s._destroy_open_corrupted_schedule = Mock()
+ s._destroy_open_corrupted_schedule.return_value = MagicMock()
+
+ # self._store['entries'] will throw a KeyError
+ s._store.__getitem__.side_effect = KeyError()
+ # then, when _create_schedule tries to reset _store['entries'], throw another error
+ expected_error = UnicodeDecodeError("ascii", b"ordinal not in range(128)", 0, 0, "")
+ s._store.__setitem__.side_effect = expected_error
+
+ s._create_schedule()
+ s._destroy_open_corrupted_schedule.assert_called_with(expected_error)
+
+ def test_create_schedule_missing_entries(self):
+ """
+ Test that if _create_schedule can't find the key "entries" in _store it will recreate it
+ """
+ s = create_persistent_scheduler()[0](app=self.app, schedule_filename="schedule")
+ s._store = MagicMock()
+
+ # self._store['entries'] will throw a KeyError
+ s._store.__getitem__.side_effect = TypeError()
+
+ s._create_schedule()
+ s._store.__setitem__.assert_called_with("entries", {})
+
def test_setup_schedule(self):
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
| Celery beat UnicodeDecodeError (Python 3.4) issue
I am using Python 3.4 with Celery 3.1.19
Running without beat it works properly:
```
celery worker --app worker --config=celeryconfig --loglevel=info
```
But with celery beat:
```
celery worker --app worker -B --config=celeryconfig --loglevel=info
```
I got this exception:
```
Traceback (most recent call last):
File "/env/lib/python3.4/site-packages/kombu/utils/__init__.py", line 320, in __get__
return obj.__dict__[self.__name__]
KeyError: 'scheduler'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/shelve.py", line 111, in __getitem__
value = self.cache[key]
KeyError: 'entries'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/env/lib/python3.4/site-packages/billiard/process.py", line 292, in _bootstrap
self.run()
File "/env/lib/python3.4/site-packages/celery/beat.py", line 530, in run
self.service.start(embedded_process=True)
File "/env/lib/python3.4/site-packages/celery/beat.py", line 454, in start
humanize_seconds(self.scheduler.max_interval))
File "/env/lib/python3.4/site-packages/kombu/utils/__init__.py", line 322, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/env/lib/python3.4/site-packages/celery/beat.py", line 494, in scheduler
return self.get_scheduler()
File "/env/lib/python3.4/site-packages/celery/beat.py", line 489, in get_scheduler
lazy=lazy)
File "/env/lib/python3.4/site-packages/celery/utils/imports.py", line 53, in instantiate
return symbol_by_name(name)(*args, **kwargs)
File "/env/lib/python3.4/site-packages/celery/beat.py", line 358, in __init__
Scheduler.__init__(self, *args, **kwargs)
File "/env/lib/python3.4/site-packages/celery/beat.py", line 185, in __init__
self.setup_schedule()
File "/env/lib/python3.4/site-packages/celery/beat.py", line 377, in setup_schedule
self._store['entries']
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/shelve.py", line 114, in __getitem__
value = Unpickler(f).load()
UnicodeDecodeError: 'ascii' codec can't decode byte 0xdf in position 1: ordinal not in range(128)
```
Any ideas? Thanks in advance
| It works for me here, maybe you could try removing the celerybeat-schedule file?
It worked! thanks! I removed the `celerybeat-schedule.db` file in the current directory.
Me too, thaks @ask ! Just so you know I've migrated from `Python 2.7.12` to `Python 3.5.3` and it started happening.
Worked for me also, what I did is rename the file to "celerybeat-schedule.db.backup"
in dicrectory Delete (rm) celerybeat-schedule.db and celerybeat-schedule like this
```
cd project-directory
rm celerybeat-schedule.db celerybeat-schedule
``` | 2024-01-19T00:56:19 |
celery/celery | 8,982 | celery__celery-8982 | [
"8981"
] | e9ebd657b0327dde2170706d8d6b81f01e7bdad0 | diff --git a/celery/security/serialization.py b/celery/security/serialization.py
--- a/celery/security/serialization.py
+++ b/celery/security/serialization.py
@@ -29,7 +29,8 @@ def serialize(self, data):
assert self._cert is not None
with reraise_errors('Unable to serialize: {0!r}', (Exception,)):
content_type, content_encoding, body = dumps(
- bytes_to_str(data), serializer=self._serializer)
+ data, serializer=self._serializer)
+
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
@@ -48,7 +49,7 @@ def deserialize(self, data):
payload['signer'],
payload['body'])
self._cert_store[signer].verify(body, signature, self._digest)
- return loads(bytes_to_str(body), payload['content_type'],
+ return loads(body, payload['content_type'],
payload['content_encoding'], force=True)
def _pack(self, body, content_type, content_encoding, signer, signature,
@@ -84,7 +85,7 @@ def _unpack(self, payload, sep=str_to_bytes('\x00\x01')):
'signature': signature,
'content_type': bytes_to_str(v[0]),
'content_encoding': bytes_to_str(v[1]),
- 'body': bytes_to_str(v[2]),
+ 'body': v[2],
}
| diff --git a/t/unit/security/test_serialization.py b/t/unit/security/test_serialization.py
--- a/t/unit/security/test_serialization.py
+++ b/t/unit/security/test_serialization.py
@@ -16,15 +16,19 @@
class test_secureserializer(SecurityCase):
- def _get_s(self, key, cert, certs):
+ def _get_s(self, key, cert, certs, serializer="json"):
store = CertStore()
for c in certs:
store.add_cert(Certificate(c))
- return SecureSerializer(PrivateKey(key), Certificate(cert), store)
+ return SecureSerializer(
+ PrivateKey(key), Certificate(cert), store, serializer=serializer
+ )
- def test_serialize(self):
- s = self._get_s(KEY1, CERT1, [CERT1])
- assert s.deserialize(s.serialize('foo')) == 'foo'
+ @pytest.mark.parametrize("data", [1, "foo", b"foo", {"foo": 1}])
+ @pytest.mark.parametrize("serializer", ["json", "pickle"])
+ def test_serialize(self, data, serializer):
+ s = self._get_s(KEY1, CERT1, [CERT1], serializer=serializer)
+ assert s.deserialize(s.serialize(data)) == data
def test_deserialize(self):
s = self._get_s(KEY1, CERT1, [CERT1])
| SecureSerializer fails on certain types and binary serializers
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [X] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [X] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [X] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [X] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [X] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
- [X] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [X] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**: N/A or Unknown
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
Alternatively, the pytest-celery plugin can be used to create standalone reproduction scripts
that can be added to this report. See the pytest-celery documentation for more information at
pytest-celery.readthedocs.io
-->
<details>
<p>
```python
app.conf.update(
security_key='/private/keys/celery/private.key',
security_certificate='/private/keys/celery/public.pem',
security_cert_store='/private/keys/celery/*.pem')
app.setup_security()
@app.task
def serializer_test_task(arg: Any) -> Any:
return arg
def test_serialize(data):
res = serializer_test_task.delay(data)
deserialized_value = res.get()
assert deserialized_value == data
test_serialize(data=b"foo") # fails to validate signature
#############
app.setup_security(serializer="pickle")
test_serialize(data="foo") # fails to serialize any value using pickle serializer
```
</p>
</details>
# Expected Behavior
# Actual Behavior
There are several bugs in the SecureSerializer feature:
- When using the 'json' serializer (default), it will always fail to validate the signature if the passed value is of type 'bytes'
- When using the other binary serializer (and not a string-based serializer like 'json'), for example 'pickle', the serializer is completely broken.
| 2024-04-25T21:29:35 |
|
celery/celery | 8,984 | celery__celery-8984 | [
"8976"
] | 04af085f6d21d85cecaeafc406f8c08cb12502e7 | diff --git a/celery/contrib/django/task.py b/celery/contrib/django/task.py
--- a/celery/contrib/django/task.py
+++ b/celery/contrib/django/task.py
@@ -12,10 +12,10 @@ class DjangoTask(Task):
Provide a nicer API to trigger tasks at the end of the DB transaction.
"""
- def delay_on_commit(self, *args, **kwargs):
+ def delay_on_commit(self, *args, **kwargs) -> None:
"""Call :meth:`~celery.app.task.Task.delay` with Django's ``on_commit()``."""
- return transaction.on_commit(functools.partial(self.delay, *args, **kwargs))
+ transaction.on_commit(functools.partial(self.delay, *args, **kwargs))
- def apply_async_on_commit(self, *args, **kwargs):
+ def apply_async_on_commit(self, *args, **kwargs) -> None:
"""Call :meth:`~celery.app.task.Task.apply_async` with Django's ``on_commit()``."""
- return transaction.on_commit(functools.partial(self.apply_async, *args, **kwargs))
+ transaction.on_commit(functools.partial(self.apply_async, *args, **kwargs))
| diff --git a/t/unit/contrib/django/test_task.py b/t/unit/contrib/django/test_task.py
--- a/t/unit/contrib/django/test_task.py
+++ b/t/unit/contrib/django/test_task.py
@@ -25,8 +25,8 @@ def on_commit(self):
def test_delay_on_commit(self, task_instance, on_commit):
result = task_instance.delay_on_commit()
- assert result is not None
+ assert result is None
def test_apply_async_on_commit(self, task_instance, on_commit):
result = task_instance.apply_async_on_commit()
- assert result is not None
+ assert result is None
| New Django task with transaction atomic, return `None` instead of the task UUID
<!--
Please fill this template entirely and do not erase parts of it.
We reserve the right to close without a response
bug reports which are incomplete.
-->
# Checklist
<!--
To check an item on the list replace [ ] with [x].
-->
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [x] I have read the relevant section in the
[contribution guide](https://docs.celeryq.dev/en/main/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the bug was already fixed in the main branch.
- [ ] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
- [ ] I have tried to reproduce the issue with [pytest-celery](https://docs.celeryq.dev/projects/pytest-celery/en/latest/userguide/celery-bug-report.html) and added the reproduction script below.
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `main` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
<!--
Try some of the below if you think they are relevant.
It will help us figure out the scope of the bug and how many users it affects.
-->
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
<!--
Please make sure to search and mention any related issues
or possible duplicates to this issue as requested by the checklist above.
This may or may not include issues in other repositories that the Celery project
maintains or other repositories that are dependencies of Celery.
If you don't know how to mention issues, please refer to Github's documentation
on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
-->
#### Related Issues
- None
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**:
5.4.0
<!-- Include the output of celery -A proj report below -->
<details>
<summary><b><code>celery report</code> Output:</b></summary>
<p>
```
```
</p>
</details>
# Steps to Reproduce
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
- **Minimal Python Version**: N/A or Unknown
- **Minimal Celery Version**:5.4.0
- **Minimal Kombu Version**: N/A or Unknown
- **Minimal Broker Version**: N/A or Unknown
- **Minimal Result Backend Version**: N/A or Unknown
- **Minimal OS and/or Kernel Version**: N/A or Unknown
- **Minimal Broker Client Version**: N/A or Unknown
- **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
<!-- Please fill the contents of pip freeze below -->
<details>
<summary><b><code>pip freeze</code> Output:</b></summary>
<p>
```
```
</p>
</details>
### Other Dependencies
<!--
Please provide system dependencies, configuration files
and other dependency information if applicable
-->
<details>
<p>
N/A
</p>
</details>
## Minimally Reproducible Test Case
<!--
Please provide a reproducible test case.
Refer to the Reporting Bugs section in our contribution guide.
We prefer submitting test cases in the form of a PR to our integration test suite.
If you can provide one, please mention the PR number below.
If not, please attach the most minimal code example required to reproduce the issue below.
If the test case is too large, please include a link to a gist or a repository below.
Alternatively, the pytest-celery plugin can be used to create standalone reproduction scripts
that can be added to this report. See the pytest-celery documentation for more information at
pytest-celery.readthedocs.io
-->
<details>
<p>
```python
```
</p>
</details>
# Expected Behavior
<!-- Describe in detail what you expect to happen -->
Return the correct task uuid like before the release 5.4.0
# Actual Behavior
<!--
Describe in detail what actually happened.
Please include a backtrace and surround it with triple backticks (```).
In addition, include the Celery daemon logs, the broker logs,
the result backend logs and system logs below if they will help us debug
the issue.
-->
## Issue ⚠️
`AttributeError: 'NoneType' object has no attribute 'id'
```python
@app.task
def example_task(id):
try:
obj = Model.objects.get(id=id)
obj.number += 1
obj.save(update_fields=["number"])
except ObjectDoesNotExist as e:
return "str(e)"
task = example_task.delay_on_commit(example_model_instance.id)
example_model_instance.task_uuid = task.id
```
Literally basic minimal task 😄 , but the `delay_on_commit` not return the task uuid for `AsyncResult`, and in my case django raise excpetion (rightly) AttributeError -> maybe the task with `delay_on_commit` return `None`?
docs:
```python
class DjangoTask(Task):
"""
Extend the base :class:`~celery.app.task.Task` for Django.
Provide a nicer API to trigger tasks at the end of the DB transaction.
"""
def delay_on_commit(self, *args, **kwargs):
"""Call :meth:`~celery.app.task.Task.delay` with Django's ``on_commit()``."""
return transaction.on_commit(functools.partial(self.delay, *args, **kwargs))
def apply_async_on_commit(self, *args, **kwargs):
"""Call :meth:`~celery.app.task.Task.apply_async` with Django's ``on_commit()``."""
return transaction.on_commit(functools.partial(self.apply_async, *args, **kwargs))
```
| @browniebroke
> [x] I have tried to reproduce the issue with pytest-celery and added the reproduction script below.
Where is it?
> > [x] I have tried to reproduce the issue with pytest-celery and added the reproduction script below.
>
> Where is it?
oh no sorry... my fault
I think that django `transaction.on_commit` should return `None`, but this is the source of django `transaction,py`
[transaction.py](https://github.com/django/django/blob/main/django/db/transaction.py)
if use the decorator instead the on_commit function?
delay and apply_async like before but:
```python
@transaction.atomic
def delay(.....)
@transaction.atomic
def apply_async(.....)
```
Yes, that's expected behaviour, `transaction.on_commit(...)` delays the execution of the wrapped function to a later stage, when the Django DB transaction finishes, so you won't get a task UUID until then. If you need access to the task UUID, you should use `delay` or `apply_async`, NOT the `..._on_commit` variants. While I didn't forsee this issue, I'm glad we picked entirely new method names, which means there is an escape hatch for you. We should document this caveat better, though.
Your solution to use `@transaction.atomic` decorator does not do the same thing. It will open either a new transaction, or [a savepoint (if you're already in a transaction)](https://docs.djangoproject.com/en/5.0/topics/db/transactions/#savepoints), which may or may not have the same effect. Consider this case, for example:
```python
def create_user(request):
with transaction.atomic():
user = User.objects.create(first_name="bob", last_name="doe")
send_welcome_email.delay_on_commit(user.pk)
# later in the transaction, do something that cause the transaction to be rolledback
# e.g. set the email to a value that already exists, raising IntegrityError
user.email = "[email protected]"
user.save() # <- raises IntegrityError, and rollback the whole transaction
```
When the whole transaction is rolled back, the whole operation failed, and user is NOT created in the DB, so we shouldn't send the welcome email. Since the transaction wasn't commited, the task never triggered (and no UUID was generated).
Now if we replace the `delay_on_commit` by a call to `delay` with the `@transaction.atomic`, then the task will be queued in a nested transaction, and we will try to send welcome email (although that will probably crash, since the user wasn't actually persistend in the DB).
I confirm that `delay_on_commit` return None:
```python
In [9]: @app.task
...: def sum_number(n1, n2):
...: print(n1+n2)
...:
In [10]: task = sum_number.delay(1,2)
In [11]: task
Out[11]: <AsyncResult: 3a4138ec-35d7-4d46-808f-3f73a1dfee53>
In [12]: task = sum_number.delay_on_commit(1,2)
In [13]: task
In [14]: assert task is None
In [15]: task is None
Out[15]: True
```
> Yes, that's expected behaviour, `transaction.on_commit(...)` delays the execution of the wrapped function to a later stage, when the Django DB transaction finishes, so you won't get a task UUID until then. If you need access to the task UUID, you should use `delay` or `apply_async`, NOT the `..._on_commit` variants. While I didn't forsee this issue, I'm glad we picked entirely new method names, which means there is an escape hatch for you. We should document this caveat better, though.
>
> Your solution to use `@transaction.atomic` decorator does not do the same thing. It will open either a new transaction, or [a savepoint (if you're already in a transaction)](https://docs.djangoproject.com/en/5.0/topics/db/transactions/#savepoints), which may or may not have the same effect. Consider this case, for example:
>
> ```python
> def create_user(request):
> with transaction.atomic():
> user = User.objects.create(first_name="bob", last_name="doe")
> send_welcome_email.delay_on_commit(user.pk)
> # later in the transaction, do something that cause the transaction to be rolledback
> # e.g. set the email to a value that already exists, raising IntegrityError
> user.email = "[email protected]"
> user.save() # <- raises IntegrityError, and rollback the whole transaction
> ```
>
> When the whole transaction is rolled back, the whole operation failed, and user is NOT created in the DB, so we shouldn't send the welcome email. Since the transaction wasn't commited, the task never triggered (and no UUID was generated).
>
> Now if we replace the `delay_on_commit` by `call to`delay`with the`@transaction.atomic`, then the task will be queued in a nested transaction, and we will try to send welcome email (although that will probably crash, since the user wasn't actually persistend in the DB).
Ohh correctly...so if I need the task UUID just use mandatory the `delay` or `apply_async` call task method, correctly?
> so if I need the task UUID just use mandatory the `delay` or `apply_async` call task method, correctly?
Correct, yes
we can develop the way to wait for the end of the transaction and return the task UUID?
or it's a bad idea?
That kind of defeat the whole purpose of the `on_commit` utility, which is a mechanism to delay execution and move on without waiting. Here is the source:
https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/transaction.py#L129-L134
Which essentially calls this:
https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/backends/base/base.py#L727-L750
The function is NOT executed at this point, it's basically added to a list of callbacks (line 732):
```python
self.run_on_commit.append((set(self.savepoint_ids), func, robust))
```
This list is read and executed in `run_and_clear_commit_hooks` (defined below):
https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/backends/base/base.py#L752C9-L769
That may happen in a completely different context that where you trigger the task.
Let me know if anything is unclear, it would be good to incorporate explanations to lift any doubts you may have in the Celery docs...
> That kind of defeat the whole purpose of the `on_commit` utility, which is a mechanism to delay execution and move on without waiting. Here is the source:
>
> https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/transaction.py#L129-L134
>
> Which essentially calls this:
>
> https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/backends/base/base.py#L727-L750
>
> The function is NOT executed at this point, it's basically added to a list of callbacks (line 732):
>
> ```python
> self.run_on_commit.append((set(self.savepoint_ids), func, robust))
> ```
>
> This list is read and executed in `run_and_clear_commit_hooks` (defined below):
>
> https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/backends/base/base.py#L752C9-L769
>
> That may happen in a completely different context that where you trigger the task.
So... it's a way for not waiting the finish of the transaction, correctly?
In this `while`, iterate and call `func` to execute for every function to execute on_commit
https://github.com/django/django/blob/ec8552417df51df8482df61b8ad78a7002634011/django/db/backends/base/base.py#L756-L767 | 2024-04-27T11:49:14 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.