repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
microsoft/MLOS
663
microsoft__MLOS-663
[ "665" ]
9175d1885634d4a711a00918b617bbe351757e87
diff --git a/mlos_bench/mlos_bench/optimizers/convert_configspace.py b/mlos_bench/mlos_bench/optimizers/convert_configspace.py --- a/mlos_bench/mlos_bench/optimizers/convert_configspace.py +++ b/mlos_bench/mlos_bench/optimizers/convert_configspace.py @@ -8,15 +8,18 @@ import logging -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from ConfigSpace import ( + Beta, CategoricalHyperparameter, Configuration, ConfigurationSpace, EqualsCondition, - UniformFloatHyperparameter, - UniformIntegerHyperparameter, + Float, + Integer, + Normal, + Uniform, ) from mlos_bench.tunables.tunable import Tunable, TunableValue from mlos_bench.tunables.tunable_groups import TunableGroups @@ -76,24 +79,51 @@ def _tunable_to_configspace( meta=meta) }) + distribution: Union[Uniform, Normal, Beta, None] = None + if tunable.distribution == "uniform": + distribution = Uniform() + elif tunable.distribution == "normal": + distribution = Normal( + mu=tunable.distribution_params["mu"], + sigma=tunable.distribution_params["sigma"] + ) + elif tunable.distribution == "beta": + distribution = Beta( + alpha=tunable.distribution_params["alpha"], + beta=tunable.distribution_params["beta"] + ) + elif tunable.distribution is not None: + raise TypeError(f"Invalid Distribution Type: {tunable.distribution}") + if tunable.type == "int": - hp_type = UniformIntegerHyperparameter + range_hp = Integer( + name=tunable.name, + bounds=(int(tunable.range[0]), int(tunable.range[1])), + log=bool(tunable.is_log), + q=int(tunable.quantization) if tunable.quantization is not None else None, + distribution=distribution, + default=(int(tunable.default) + if tunable.in_range(tunable.default) and tunable.default is not None + else None), + meta=meta + ) elif tunable.type == "float": - hp_type = UniformFloatHyperparameter + range_hp = Float( + name=tunable.name, + bounds=tunable.range, + log=bool(tunable.is_log), + q=tunable.quantization, # type: ignore[arg-type] + distribution=distribution, # type: ignore[arg-type] + default=(float(tunable.default) + if tunable.in_range(tunable.default) and tunable.default is not None + else None), + meta=meta + ) else: - raise TypeError(f"Undefined Parameter Type: {tunable.type}") + raise TypeError(f"Invalid Parameter Type: {tunable.type}") if not tunable.special: - return ConfigurationSpace({ - tunable.name: hp_type( - name=tunable.name, - lower=tunable.range[0], - upper=tunable.range[1], - log=tunable.is_log, - q=tunable.quantization, - default_value=tunable.default if tunable.in_range(tunable.default) else None, - meta=meta) - }) + return ConfigurationSpace({tunable.name: range_hp}) # Compute the probabilities of switching between regular and special values. special_weights: Optional[List[float]] = None @@ -106,15 +136,7 @@ def _tunable_to_configspace( # one for special values, and one to choose between the two. (special_name, type_name) = special_param_names(tunable.name) conf_space = ConfigurationSpace({ - tunable.name: hp_type( - name=tunable.name, - lower=tunable.range[0], - upper=tunable.range[1], - log=tunable.is_log, - q=tunable.quantization, - default_value=tunable.default if tunable.in_range(tunable.default) else None, - meta=meta - ), + tunable.name: range_hp, special_name: CategoricalHyperparameter( name=special_name, choices=tunable.special, diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -9,7 +9,7 @@ import collections import logging -from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, TypedDict, Union +from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type, TypedDict, Union _LOG = logging.getLogger(__name__) @@ -17,6 +17,17 @@ """A tunable parameter value type alias.""" TunableValue = Union[int, float, Optional[str]] +DistributionName = Literal["uniform", "normal", "beta"] + + +class DistributionDict(TypedDict, total=False): + """ + A typed dict for tunable parameters' distributions. + """ + + type: DistributionName + params: Optional[Dict[str, float]] + class TunableDict(TypedDict, total=False): """ @@ -34,6 +45,7 @@ class TunableDict(TypedDict, total=False): range: Optional[Union[Sequence[int], Sequence[float]]] quantization: Optional[Union[int, float]] log: Optional[bool] + distribution: Optional[DistributionDict] special: Optional[Union[List[int], List[float]]] values_weights: Optional[List[float]] special_weights: Optional[List[float]] @@ -41,7 +53,7 @@ class TunableDict(TypedDict, total=False): meta: Dict[str, Any] -class Tunable: # pylint: disable=too-many-instance-attributes +class Tunable: # pylint: disable=too-many-instance-attributes,too-many-public-methods """ A tunable parameter definition and its current value. """ @@ -64,7 +76,7 @@ def __init__(self, name: str, config: TunableDict): config : dict Python dict that represents a Tunable (e.g., deserialized from JSON) """ - if '!' in name: # TODO: Use a regex here and in JSON schema + if not isinstance(name, str) or '!' in name: # TODO: Use a regex here and in JSON schema raise ValueError(f"Invalid name of the tunable: {name}") self._name = name self._type = config["type"] # required @@ -80,6 +92,12 @@ def __init__(self, name: str, config: TunableDict): self._range: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None self._quantization: Optional[Union[int, float]] = config.get("quantization") self._log: Optional[bool] = config.get("log") + self._distribution: Optional[DistributionName] = None + self._distribution_params: Dict[str, float] = {} + distr = config.get("distribution") + if distr: + self._distribution = distr["type"] # required + self._distribution_params = distr.get("params") or {} config_range = config.get("range") if config_range is not None: assert len(config_range) == 2, f"Invalid range: {config_range}" @@ -95,60 +113,81 @@ def __init__(self, name: str, config: TunableDict): self.value = self._default def _sanity_check(self) -> None: - # pylint: disable=too-complex,too-many-branches """ Check if the status of the Tunable is valid, and throw ValueError if it is not. """ if self.is_categorical: - if not (self._values and isinstance(self._values, collections.abc.Iterable)): - raise ValueError(f"Must specify values for the categorical type tunable {self}") - if self._range is not None: - raise ValueError(f"Range must be None for the categorical type tunable {self}") - if len(set(self._values)) != len(self._values): - raise ValueError(f"Values must be unique for the categorical type tunable {self}") - if self._special: - raise ValueError(f"Categorical tunable cannot have special values: {self}") - if self._range_weight is not None: - raise ValueError(f"Categorical tunable cannot have range_weight: {self}") - if self._log is not None: - raise ValueError(f"Categorical tunable cannot have log parameter: {self}") - if self._quantization is not None: - raise ValueError(f"Categorical tunable cannot have quantization parameter: {self}") - if self._weights: - if len(self._weights) != len(self._values): - raise ValueError(f"Must specify weights for all values: {self}") - if any(w < 0 for w in self._weights): - raise ValueError(f"All weights must be non-negative: {self}") + self._sanity_check_categorical() elif self.is_numerical: - if self._values is not None: - raise ValueError(f"Values must be None for the numerical type tunable {self}") - if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: - raise ValueError(f"Invalid range for tunable {self}: {self._range}") - if self._quantization is not None: - if self.dtype == int: - if not isinstance(self._quantization, int): - raise ValueError(f"Quantization of a int param should be an int: {self}") - if self._quantization <= 1: - raise ValueError(f"Number of quantization points is <= 1: {self}") - if self.dtype == float: - if not isinstance(self._quantization, (float, int)): - raise ValueError(f"Quantization of a float param should be a float or int: {self}") - if self._quantization <= 0: - raise ValueError(f"Number of quantization points is <= 0: {self}") - if self._weights: - if self._range_weight is None: - raise ValueError(f"Must specify weight for the range: {self}") - if len(self._weights) != len(self._special): - raise ValueError("Must specify weights for all special values {self}") - if any(w < 0 for w in self._weights + [self._range_weight]): - raise ValueError(f"All weights must be non-negative: {self}") - elif self._range_weight is not None: - raise ValueError(f"Must specify both weights and range_weight or none: {self}") + self._sanity_check_numerical() else: raise ValueError(f"Invalid parameter type for tunable {self}: {self._type}") if not self.is_valid(self.default): raise ValueError(f"Invalid default value for tunable {self}: {self.default}") + def _sanity_check_categorical(self) -> None: + """ + Check if the status of the categorical Tunable is valid, and throw ValueError if it is not. + """ + # pylint: disable=too-complex + assert self.is_categorical + if not (self._values and isinstance(self._values, collections.abc.Iterable)): + raise ValueError(f"Must specify values for the categorical type tunable {self}") + if self._range is not None: + raise ValueError(f"Range must be None for the categorical type tunable {self}") + if len(set(self._values)) != len(self._values): + raise ValueError(f"Values must be unique for the categorical type tunable {self}") + if self._special: + raise ValueError(f"Categorical tunable cannot have special values: {self}") + if self._range_weight is not None: + raise ValueError(f"Categorical tunable cannot have range_weight: {self}") + if self._log is not None: + raise ValueError(f"Categorical tunable cannot have log parameter: {self}") + if self._quantization is not None: + raise ValueError(f"Categorical tunable cannot have quantization parameter: {self}") + if self._distribution is not None: + raise ValueError(f"Categorical parameters do not support `distribution`: {self}") + if self._weights: + if len(self._weights) != len(self._values): + raise ValueError(f"Must specify weights for all values: {self}") + if any(w < 0 for w in self._weights): + raise ValueError(f"All weights must be non-negative: {self}") + + def _sanity_check_numerical(self) -> None: + """ + Check if the status of the numerical Tunable is valid, and throw ValueError if it is not. + """ + # pylint: disable=too-complex,too-many-branches + assert self.is_numerical + if self._values is not None: + raise ValueError(f"Values must be None for the numerical type tunable {self}") + if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: + raise ValueError(f"Invalid range for tunable {self}: {self._range}") + if self._quantization is not None: + if self.dtype == int: + if not isinstance(self._quantization, int): + raise ValueError(f"Quantization of a int param should be an int: {self}") + if self._quantization <= 1: + raise ValueError(f"Number of quantization points is <= 1: {self}") + if self.dtype == float: + if not isinstance(self._quantization, (float, int)): + raise ValueError(f"Quantization of a float param should be a float or int: {self}") + if self._quantization <= 0: + raise ValueError(f"Number of quantization points is <= 0: {self}") + if self._distribution is not None and self._distribution not in {"uniform", "normal", "beta"}: + raise ValueError(f"Invalid distribution: {self}") + if self._distribution_params and self._distribution is None: + raise ValueError(f"Must specify the distribution: {self}") + if self._weights: + if self._range_weight is None: + raise ValueError(f"Must specify weight for the range: {self}") + if len(self._weights) != len(self._special): + raise ValueError("Must specify weights for all special values {self}") + if any(w < 0 for w in self._weights + [self._range_weight]): + raise ValueError(f"All weights must be non-negative: {self}") + elif self._range_weight is not None: + raise ValueError(f"Must specify both weights and range_weight or none: {self}") + def __repr__(self) -> str: """ Produce a human-readable version of the Tunable (mostly for logging). @@ -533,6 +572,31 @@ def is_log(self) -> Optional[bool]: assert self.is_numerical return self._log + @property + def distribution(self) -> Optional[DistributionName]: + """ + Get the name of the distribution (uniform, normal, or beta) if specified. + + Returns + ------- + distribution : str + Name of the distribution (uniform, normal, or beta) or None. + """ + return self._distribution + + @property + def distribution_params(self) -> Dict[str, float]: + """ + Get the parameters of the distribution, if specified. + + Returns + ------- + distribution_params : Dict[str, float] + Parameters of the distribution or None. + """ + assert self._distribution is not None + return self._distribution_params + @property def categories(self) -> List[Optional[str]]: """
diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -3,7 +3,7 @@ "cost": 1, "description": "Covariant group description", "params": { - "int": { + "beta-int": { "description": "Int", "type": "int", "default": 10, @@ -13,9 +13,35 @@ "special_weights": [0.1], "range_weight": 0.9, "quantization": 50, + "distribution": { + "type": "beta", + "params": { + "alpha": 0.1, + "beta": 0.1 + } + }, "log": true }, - "float": { + "normal-int": { + "description": "Int", + "type": "int", + "default": 10, + "range": [1, 500], + "meta": {"suffix": "MB"}, + "special": [-1], + "special_weights": [0.1], + "range_weight": 0.9, + "quantization": 50, + "distribution": { + "type": "normal", + "params": { + "mu": 0, + "sigma": 0.1 + } + }, + "log": true + }, + "uniform-float": { "description": "Float", "type": "float", "default": 10.1, @@ -23,6 +49,9 @@ "range": [1.1, 111.1], "special": [-1.1], "quantization": 10, + "distribution": { + "type": "uniform" + }, "log": false }, "cat": { diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_distributions_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_distributions_test.py new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_distributions_test.py @@ -0,0 +1,110 @@ +# +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +""" +Unit tests for checking tunable parameters' distributions. +""" + +import json5 as json +import pytest + +from mlos_bench.tunables.tunable import Tunable + + +def test_categorical_distribution() -> None: + """ + Try to instantiate a categorical tunable with distribution specified. + """ + with pytest.raises(ValueError): + Tunable(name='test', config={ + "type": "categorical", + "values": ["foo", "bar", "baz"], + "distribution": { + "type": "uniform" + }, + "default": "foo" + }) + + [email protected]("tunable_type", ["int", "float"]) +def test_numerical_distribution_uniform(tunable_type: str) -> None: + """ + Create a numeric Tunable with explicit uniform distribution. + """ + tunable = Tunable(name="test", config={ + "type": tunable_type, + "range": [0, 10], + "distribution": { + "type": "uniform" + }, + "default": 0 + }) + assert tunable.is_numerical + assert tunable.distribution == "uniform" + assert not tunable.distribution_params + + [email protected]("tunable_type", ["int", "float"]) +def test_numerical_distribution_normal(tunable_type: str) -> None: + """ + Create a numeric Tunable with explicit Gaussian distribution specified. + """ + tunable = Tunable(name="test", config={ + "type": tunable_type, + "range": [0, 10], + "distribution": { + "type": "normal", + "params": { + "mu": 0, + "sigma": 1.0 + } + }, + "default": 0 + }) + assert tunable.distribution == "normal" + assert tunable.distribution_params == {"mu": 0, "sigma": 1.0} + + [email protected]("tunable_type", ["int", "float"]) +def test_numerical_distribution_beta(tunable_type: str) -> None: + """ + Create a numeric Tunable with explicit Beta distribution specified. + """ + tunable = Tunable(name="test", config={ + "type": tunable_type, + "range": [0, 10], + "distribution": { + "type": "beta", + "params": { + "alpha": 2, + "beta": 5 + } + }, + "default": 0 + }) + assert tunable.distribution == "beta" + assert tunable.distribution_params == {"alpha": 2, "beta": 5} + + [email protected]("tunable_type", ["int", "float"]) +def test_numerical_distribution_unsupported(tunable_type: str) -> None: + """ + Create a numeric Tunable with unsupported distribution. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 10], + "distribution": {{ + "type": "poisson", + "params": {{ + "lambda": 1.0 + }} + }}, + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name="test", config=config) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_distr_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_distr_test.py new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_distr_test.py @@ -0,0 +1,88 @@ +# +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +""" +Unit tests for converting tunable parameters with explicitly +specified distributions to ConfigSpace. +""" + +import pytest + +from ConfigSpace import ( + CategoricalHyperparameter, + BetaFloatHyperparameter, + BetaIntegerHyperparameter, + NormalFloatHyperparameter, + NormalIntegerHyperparameter, + UniformFloatHyperparameter, + UniformIntegerHyperparameter, +) + +from mlos_bench.tunables.tunable import DistributionName +from mlos_bench.tunables.tunable_groups import TunableGroups +from mlos_bench.optimizers.convert_configspace import ( + special_param_names, + tunable_groups_to_configspace, +) + + +_CS_HYPERPARAMETER = { + ("float", "beta"): BetaFloatHyperparameter, + ("int", "beta"): BetaIntegerHyperparameter, + ("float", "normal"): NormalFloatHyperparameter, + ("int", "normal"): NormalIntegerHyperparameter, + ("float", "uniform"): UniformFloatHyperparameter, + ("int", "uniform"): UniformIntegerHyperparameter, +} + + [email protected]("param_type", ["int", "float"]) [email protected]("distr_name,distr_params", [ + ("normal", {"mu": 0.0, "sigma": 1.0}), + ("beta", {"alpha": 2, "beta": 5}), + ("uniform", {}), +]) +def test_convert_numerical_distributions(param_type: str, + distr_name: DistributionName, + distr_params: dict) -> None: + """ + Convert a numerical Tunable with explicit distribution to ConfigSpace. + """ + tunable_name = "x" + tunable_groups = TunableGroups({ + "tunable_group": { + "cost": 1, + "params": { + tunable_name: { + "type": param_type, + "range": [0, 100], + "special": [-1, 0], + "special_weights": [0.1, 0.2], + "range_weight": 0.7, + "distribution": { + "type": distr_name, + "params": distr_params + }, + "default": 0 + } + } + } + }) + + (tunable, _group) = tunable_groups.get_tunable(tunable_name) + assert tunable.distribution == distr_name + assert tunable.distribution_params == distr_params + + space = tunable_groups_to_configspace(tunable_groups) + + (tunable_special, tunable_type) = special_param_names(tunable_name) + assert set(space.keys()) == {tunable_name, tunable_type, tunable_special} + + assert isinstance(space[tunable_special], CategoricalHyperparameter) + assert isinstance(space[tunable_type], CategoricalHyperparameter) + + cs_param = space[tunable_name] + assert isinstance(cs_param, _CS_HYPERPARAMETER[param_type, distr_name]) + for (key, val) in distr_params.items(): + assert getattr(cs_param, key) == val
Specify prior distributions for the Tunable parameters Currently we assume that all numeric tunables have uniform distribution of the values within the specified range. We have to give the user an ability to specify other prior distributions and their parameters. We need to support at least Beta and Normal distributions.
2024-02-02T01:37:23
microsoft/MLOS
678
microsoft__MLOS-678
[ "677" ]
28644bcca62c5882b53a595f1c86dccfc76d108e
diff --git a/mlos_bench/mlos_bench/storage/sql/schema.py b/mlos_bench/mlos_bench/storage/sql/schema.py --- a/mlos_bench/mlos_bench/storage/sql/schema.py +++ b/mlos_bench/mlos_bench/storage/sql/schema.py @@ -17,10 +17,6 @@ _LOG = logging.getLogger(__name__) -# This class is internal to SqlStorage and is mostly a struct -# for all DB tables, so it's ok to disable the warnings. -# pylint: disable=too-many-instance-attributes - class _DDL: """ @@ -46,19 +42,29 @@ class DbSchema: A class to define and create the DB schema. """ + # This class is internal to SqlStorage and is mostly a struct + # for all DB tables, so it's ok to disable the warnings. + # pylint: disable=too-many-instance-attributes + + # Common string column sizes. + _ID_LEN = 512 + _PARAM_VALUE_LEN = 1024 + _METRIC_VALUE_LEN = 255 + _STATUS_LEN = 16 + def __init__(self, engine: Engine): """ Declare the SQLAlchemy schema for the database. """ _LOG.info("Create the DB schema for: %s", engine) self._engine = engine - # TODO: bind for automatic schema updates? + # TODO: bind for automatic schema updates? (#649) self._meta = MetaData() self.experiment = Table( "experiment", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("description", String(1024)), Column("root_env_config", String(1024), nullable=False), Column("git_repo", String(1024), nullable=False), @@ -71,7 +77,7 @@ def __init__(self, engine: Engine): "objectives", self._meta, Column("exp_id"), - Column("optimization_target", String(1024), nullable=False), + Column("optimization_target", String(self._ID_LEN), nullable=False), Column("optimization_direction", String(4), nullable=False), # TODO: Note: weight is not fully supported yet as currently # multi-objective is expected to explore each objective equally. @@ -103,13 +109,13 @@ def __init__(self, engine: Engine): self.trial = Table( "trial", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("trial_id", Integer, nullable=False), Column("config_id", Integer, nullable=False), Column("ts_start", DateTime, nullable=False), Column("ts_end", DateTime), # Should match the text IDs of `mlos_bench.environments.Status` enum: - Column("status", String(16), nullable=False), + Column("status", String(self._STATUS_LEN), nullable=False), PrimaryKeyConstraint("exp_id", "trial_id"), ForeignKeyConstraint(["exp_id"], [self.experiment.c.exp_id]), @@ -122,8 +128,8 @@ def __init__(self, engine: Engine): "config_param", self._meta, Column("config_id", Integer, nullable=False), - Column("param_id", String(255), nullable=False), - Column("param_value", String(255)), + Column("param_id", String(self._ID_LEN), nullable=False), + Column("param_value", String(self._PARAM_VALUE_LEN)), PrimaryKeyConstraint("config_id", "param_id"), ForeignKeyConstraint(["config_id"], [self.config.c.config_id]), @@ -134,10 +140,10 @@ def __init__(self, engine: Engine): self.trial_param = Table( "trial_param", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("trial_id", Integer, nullable=False), - Column("param_id", String(255), nullable=False), - Column("param_value", String(255)), + Column("param_id", String(self._ID_LEN), nullable=False), + Column("param_value", String(self._PARAM_VALUE_LEN)), PrimaryKeyConstraint("exp_id", "trial_id", "param_id"), ForeignKeyConstraint(["exp_id", "trial_id"], @@ -147,10 +153,10 @@ def __init__(self, engine: Engine): self.trial_status = Table( "trial_status", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("trial_id", Integer, nullable=False), Column("ts", DateTime, nullable=False, default="now"), - Column("status", String(16), nullable=False), + Column("status", String(self._STATUS_LEN), nullable=False), UniqueConstraint("exp_id", "trial_id", "ts"), ForeignKeyConstraint(["exp_id", "trial_id"], @@ -160,10 +166,10 @@ def __init__(self, engine: Engine): self.trial_result = Table( "trial_result", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("trial_id", Integer, nullable=False), - Column("metric_id", String(255), nullable=False), - Column("metric_value", String(255)), + Column("metric_id", String(self._ID_LEN), nullable=False), + Column("metric_value", String(self._METRIC_VALUE_LEN)), PrimaryKeyConstraint("exp_id", "trial_id", "metric_id"), ForeignKeyConstraint(["exp_id", "trial_id"], @@ -173,11 +179,11 @@ def __init__(self, engine: Engine): self.trial_telemetry = Table( "trial_telemetry", self._meta, - Column("exp_id", String(255), nullable=False), + Column("exp_id", String(self._ID_LEN), nullable=False), Column("trial_id", Integer, nullable=False), Column("ts", DateTime, nullable=False, default="now"), - Column("metric_id", String(255), nullable=False), - Column("metric_value", String(255)), + Column("metric_id", String(self._ID_LEN), nullable=False), + Column("metric_value", String(self._METRIC_VALUE_LEN)), UniqueConstraint("exp_id", "trial_id", "ts", "metric_id"), ForeignKeyConstraint(["exp_id", "trial_id"],
`objectives.optimization_target` column is too long to be in the primary key for MySQL From @eujing : > during CREATE TABLE that sqlalchemy issues, it cant allocate the column VARCHAR(1024) because primary keys internally cannot be more than 3072 bytes
2024-02-14T22:17:49
microsoft/MLOS
686
microsoft__MLOS-686
[ "613" ]
7cf00b3a79630039118c1f19483d0e3510616a1d
diff --git a/mlos_bench/mlos_bench/storage/sql/experiment.py b/mlos_bench/mlos_bench/storage/sql/experiment.py --- a/mlos_bench/mlos_bench/storage/sql/experiment.py +++ b/mlos_bench/mlos_bench/storage/sql/experiment.py @@ -168,6 +168,8 @@ def _get_params(conn: Connection, table: Table, **kwargs: Any) -> Dict[str, Any] @staticmethod def _save_params(conn: Connection, table: Table, params: Dict[str, Any], **kwargs: Any) -> None: + if not params: + return conn.execute(table.insert(), [ { **kwargs, diff --git a/mlos_bench/mlos_bench/tunables/tunable_groups.py b/mlos_bench/mlos_bench/tunables/tunable_groups.py --- a/mlos_bench/mlos_bench/tunables/tunable_groups.py +++ b/mlos_bench/mlos_bench/tunables/tunable_groups.py @@ -36,6 +36,9 @@ def __init__(self, config: Optional[dict] = None): for (name, group_config) in config.items(): self._add_group(CovariantTunableGroup(name, group_config)) + def __bool__(self) -> bool: + return bool(self._index) + def __eq__(self, other: object) -> bool: """ Check if two TunableGroups are equal.
diff --git a/mlos_bench/mlos_bench/tests/storage/conftest.py b/mlos_bench/mlos_bench/tests/storage/conftest.py --- a/mlos_bench/mlos_bench/tests/storage/conftest.py +++ b/mlos_bench/mlos_bench/tests/storage/conftest.py @@ -15,6 +15,7 @@ # Expose some of those as local names so they can be picked up as fixtures by pytest. storage = sql_storage_fixtures.storage exp_storage = sql_storage_fixtures.exp_storage +exp_storage_no_tunables = sql_storage_fixtures.exp_storage_no_tunables mixed_numerics_exp_storage = sql_storage_fixtures.mixed_numerics_exp_storage exp_storage_with_trials = sql_storage_fixtures.exp_storage_with_trials mixed_numerics_exp_storage_with_trials = sql_storage_fixtures.mixed_numerics_exp_storage_with_trials diff --git a/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py b/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py --- a/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py +++ b/mlos_bench/mlos_bench/tests/storage/sql/fixtures.py @@ -63,6 +63,31 @@ def exp_storage( assert not exp._in_context [email protected] +def exp_storage_no_tunables( + storage: SqlStorage, +) -> Generator[SqlStorage.Experiment, None, None]: + """ + Test fixture for Experiment using in-memory SQLite3 storage. + Note: It has already entered the context upon return. + """ + opt_target = "score" + opt_direction = "min" + empty_config: dict = {} + with storage.experiment( + experiment_id="Test-001", + trial_id=1, + root_env_config="environment.jsonc", + description="pytest experiment", + tunables=TunableGroups(empty_config), + opt_target=opt_target, + opt_direction=opt_direction, + ) as exp: + yield exp + # pylint: disable=protected-access + assert not exp._in_context + + @pytest.fixture def mixed_numerics_exp_storage( storage: SqlStorage, diff --git a/mlos_bench/mlos_bench/tests/storage/trial_config_test.py b/mlos_bench/mlos_bench/tests/storage/trial_config_test.py --- a/mlos_bench/mlos_bench/tests/storage/trial_config_test.py +++ b/mlos_bench/mlos_bench/tests/storage/trial_config_test.py @@ -62,3 +62,19 @@ def test_exp_trial_configs(exp_storage: Storage.Experiment, assert len(pending_ids) == 6 assert len(set(pending_ids)) == 2 assert set(pending_ids) == {trials1[0].tunable_config_id, trials2[0].tunable_config_id} + + +def test_exp_trial_no_config(exp_storage_no_tunables: Storage.Experiment) -> None: + """ + Schedule a trial that has an empty tunable groups config. + """ + empty_config: dict = {} + tunable_groups = TunableGroups(config=empty_config) + trial = exp_storage_no_tunables.new_trial(tunable_groups, config=empty_config) + (pending,) = exp_storage_no_tunables.pending_trials(datetime.utcnow(), running=True) + assert pending.trial_id == trial.trial_id + assert pending.tunables == tunable_groups + assert pending.config() == { + "experiment_id": "Test-001", + "trial_id": trial.trial_id, + } diff --git a/mlos_bench/mlos_bench/tests/tunables/test_empty_tunable_group.py b/mlos_bench/mlos_bench/tests/tunables/test_empty_tunable_group.py new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/tunables/test_empty_tunable_group.py @@ -0,0 +1,24 @@ +# +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +""" +Unit tests for empty tunable groups. +""" + +from mlos_bench.tunables.tunable_groups import TunableGroups + + +def test_empty_tunable_group() -> None: + """ + Test __nonzero__ property of tunable groups. + """ + tunable_groups = TunableGroups(config={}) + assert not tunable_groups + + +def test_non_empty_tunable_group(tunable_groups: TunableGroups) -> None: + """ + Test __nonzero__ property of tunable groups. + """ + assert tunable_groups
Storage schema doesn't allow an empty config In some cases its useful to test deployment and other scripts without a config (i.e., vnet deployment with no tunable params). Currently some combination of the storage schema and the way we serialize the config representation to hash it to create a config id prevents us from doing this. It would be nice if we handled that as an "empty" config somehow as well.
@motus, could you please take a look at this? commit 428cd2280d4e67fa52878a2364a85d9481ef9fd6 from #606 adds a small workaround for this for now
2024-02-23T21:33:59
microsoft/MLOS
690
microsoft__MLOS-690
[ "688" ]
28b07b192fefe9b9f33c2d3e03bb699a19399ce6
diff --git a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py @@ -0,0 +1,188 @@ +# +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +""" +Grid search optimizer for mlos_bench. +""" + +import logging + +from typing import Dict, Iterable, Set, Optional, Sequence, Tuple, Union + +import numpy as np +import ConfigSpace +from ConfigSpace.util import generate_grid + +from mlos_bench.environments.status import Status +from mlos_bench.tunables.tunable import TunableValue +from mlos_bench.tunables.tunable_groups import TunableGroups +from mlos_bench.optimizers.base_optimizer import Optimizer +from mlos_bench.optimizers.convert_configspace import configspace_data_to_tunable_values +from mlos_bench.services.base_service import Service + +_LOG = logging.getLogger(__name__) + + +class GridSearchOptimizer(Optimizer): + """ + Grid search optimizer. + """ + + def __init__(self, + tunables: TunableGroups, + config: dict, + global_config: Optional[dict] = None, + service: Optional[Service] = None): + super().__init__(tunables, config, global_config, service) + + self._best_config: Optional[TunableGroups] = None + self._best_score: Optional[float] = None + + # Track the grid as a set of tuples of tunable values and reconstruct the + # dicts as necessary. + # Note: this is not the most effecient way to do this, but avoids + # introducing a new data structure for hashable dicts. + # See https://github.com/microsoft/MLOS/pull/690 for further discussion. + + self._sanity_check() + # The ordered set of pending configs that have not yet been suggested. + self._config_keys, self._pending_configs = self._get_grid() + assert self._pending_configs + # A set of suggested configs that have not yet been registered. + self._suggested_configs: Set[Tuple[TunableValue, ...]] = set() + + def _sanity_check(self) -> None: + size = np.prod([tunable.cardinality for (tunable, _group) in self._tunables]) + if size == np.inf: + raise ValueError(f"Unquantized tunables are not supported for grid search: {self._tunables}") + if size > 10000: + _LOG.warning("Large number %d of config points requested for grid search: %s", size, self._tunables) + if size > self._max_iter: + _LOG.warning("Grid search size %d, is greater than max iterations %d", size, self._max_iter) + + def _get_grid(self) -> Tuple[Tuple[str, ...], Dict[Tuple[TunableValue, ...], None]]: + """ + Gets a grid of configs to try. + + Order is given by ConfigSpace, but preserved by dict ordering semantics. + """ + # Since we are using ConfigSpace to generate the grid, but only tracking the + # values as (ordered) tuples, we also need to use its ordering on column + # names instead of the order given by TunableGroups. + configs = [ + configspace_data_to_tunable_values(dict(config)) + for config in + generate_grid(self.config_space, { + tunable.name: int(tunable.cardinality) + for (tunable, _group) in self._tunables + if tunable.quantization or tunable.type == "int" + }) + ] + names = set(tuple(configs.keys()) for configs in configs) + assert len(names) == 1 + return names.pop(), {tuple(configs.values()): None for configs in configs} + + @property + def pending_configs(self) -> Iterable[Dict[str, TunableValue]]: + """ + Gets the set of pending configs in this grid search optimizer. + + Returns + ------- + Iterable[Dict[str, TunableValue]] + """ + # See NOTEs above. + return (dict(zip(self._config_keys, config)) for config in self._pending_configs.keys()) + + @property + def suggested_configs(self) -> Iterable[Dict[str, TunableValue]]: + """ + Gets the set of configs that have been suggested but not yet registered. + + Returns + ------- + Iterable[Dict[str, TunableValue]] + """ + # See NOTEs above. + return (dict(zip(self._config_keys, config)) for config in self._suggested_configs) + + def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], + status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: + if not super().bulk_register(configs, scores, status, is_warm_up): + return False + if status is None: + status = [Status.SUCCEEDED] * len(configs) + for (params, score, trial_status) in zip(configs, scores, status): + tunables = self._tunables.copy().assign(params) + self.register(tunables, trial_status, None if score is None else float(score)) + if is_warm_up: + # Do not advance the iteration counter during warm-up. + self._iter -= 1 + if _LOG.isEnabledFor(logging.DEBUG): + (score, _) = self.get_best_observation() + _LOG.debug("%s end: %s = %s", "Warm-up" if is_warm_up else "Update", self.target, score) + return True + + def suggest(self) -> TunableGroups: + """ + Generate the next grid search suggestion. + """ + tunables = self._tunables.copy() + if self._start_with_defaults: + _LOG.info("Use default values for the first trial") + self._start_with_defaults = False + tunables = tunables.restore_defaults() + # Need to index based on ConfigSpace dict ordering. + default_config = dict(self.config_space.get_default_configuration()) + assert tunables.get_param_values() == default_config + # Move the default from the pending to the suggested set. + default_config_values = tuple(default_config.values()) + del self._pending_configs[default_config_values] + self._suggested_configs.add(default_config_values) + else: + # Select the first item from the pending configs. + if not self._pending_configs and self._iter <= self._max_iter: + _LOG.info("No more pending configs to suggest. Restarting grid.") + self._config_keys, self._pending_configs = self._get_grid() + try: + next_config_values = next(iter(self._pending_configs.keys())) + except StopIteration as exc: + raise ValueError("No more pending configs to suggest.") from exc + next_config = dict(zip(self._config_keys, next_config_values)) + tunables.assign(next_config) + # Move it to the suggested set. + self._suggested_configs.add(next_config_values) + del self._pending_configs[next_config_values] + _LOG.info("Iteration %d :: Suggest: %s", self._iter, tunables) + return tunables + + def register(self, tunables: TunableGroups, status: Status, + score: Optional[Union[float, dict]] = None) -> Optional[float]: + registered_score = super().register(tunables, status, score) + if status.is_succeeded() and ( + self._best_score is None or (registered_score is not None and registered_score < self._best_score) + ): + self._best_score = registered_score + self._best_config = tunables.copy() + self._iter += 1 + try: + config = dict(ConfigSpace.Configuration(self.config_space, values=tunables.get_param_values())) + self._suggested_configs.remove(tuple(config.values())) + except KeyError: + _LOG.warning("Attempted to remove missing config (previously registered?) from suggested set: %s", tunables) + return registered_score + + def not_converged(self) -> bool: + if self._iter > self._max_iter: + if bool(self._pending_configs): + _LOG.warning("Exceeded max iterations, but still have %d pending configs: %s", + len(self._pending_configs), list(self._pending_configs.keys())) + return False + return bool(self._pending_configs) + + def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]: + if self._best_score is None: + return (None, None) + assert self._best_config is not None + return (self._best_score * self._opt_sign, self._best_config) diff --git a/mlos_bench/mlos_bench/tunables/tunable_groups.py b/mlos_bench/mlos_bench/tunables/tunable_groups.py --- a/mlos_bench/mlos_bench/tunables/tunable_groups.py +++ b/mlos_bench/mlos_bench/tunables/tunable_groups.py @@ -39,6 +39,9 @@ def __init__(self, config: Optional[dict] = None): def __bool__(self) -> bool: return bool(self._index) + def __len__(self) -> int: + return len(self._index) + def __eq__(self, other: object) -> bool: """ Check if two TunableGroups are equal.
diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc @@ -0,0 +1,6 @@ +{ + "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", + "config": { + "max_iterations": null, + } +} \ No newline at end of file diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/grid_search_opt_extra.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/grid_search_opt_extra.jsonc new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/grid_search_opt_extra.jsonc @@ -0,0 +1,6 @@ +{ + "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", + "config": { + "extra": "unhandled" + } +} \ No newline at end of file diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc @@ -0,0 +1,14 @@ +{ + "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", + "description": "GridSearchOptimizer", + "include_tunables": [ + "some/path/to/tunables.jsonc" + ], + "config": { + "max_iterations": 100, + "optimization_direction": "max", + "optimization_target": "score", + "seed": 12345, + "start_with_defaults": true + } +} \ No newline at end of file diff --git a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py new file mode 100644 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py @@ -0,0 +1,250 @@ +# +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +""" +Unit tests for grid search mlos_bench optimizer. +""" + +from typing import Dict, List + +import itertools +import math +import random + +import pytest + +from mlos_bench.environments.status import Status +from mlos_bench.optimizers.grid_search_optimizer import GridSearchOptimizer +from mlos_bench.tunables.tunable import TunableValue +from mlos_bench.tunables.tunable_groups import TunableGroups + + +# pylint: disable=redefined-outer-name + [email protected] +def grid_search_tunables_config() -> dict: + """ + Test fixture for grid search optimizer tunables config. + """ + return { + "grid": { + "cost": 1, + "params": { + "cat": { + "type": "categorical", + "values": ["a", "b", "c"], + "default": "a", + }, + "int": { + "type": "int", + "range": [1, 3], + "default": 2, + }, + "float": { + "type": "float", + "range": [0, 1], + "default": 0.5, + "quantization": 0.25, + }, + }, + }, + } + + [email protected] +def grid_search_tunables_grid(grid_search_tunables: TunableGroups) -> List[Dict[str, TunableValue]]: + """ + Test fixture for grid from tunable groups. + Used to check that the grids are the same (ignoring order). + """ + tunables_params_values = [tunable.values for tunable, _group in grid_search_tunables if tunable.values is not None] + tunable_names = tuple(tunable.name for tunable, _group in grid_search_tunables if tunable.values is not None) + return list(dict(zip(tunable_names, combo)) for combo in itertools.product(*tunables_params_values)) + + [email protected] +def grid_search_tunables(grid_search_tunables_config: dict) -> TunableGroups: + """ + Test fixture for grid search optimizer tunables. + """ + return TunableGroups(grid_search_tunables_config) + + [email protected] +def grid_search_opt(grid_search_tunables: TunableGroups, + grid_search_tunables_grid: List[Dict[str, TunableValue]]) -> GridSearchOptimizer: + """ + Test fixture for grid search optimizer. + """ + assert len(grid_search_tunables) == 3 + # Test the convergence logic by controlling the number of iterations to be not a + # multiple of the number of elements in the grid. + max_iterations = len(grid_search_tunables_grid) * 2 - 3 + return GridSearchOptimizer(tunables=grid_search_tunables, config={ + "max_iterations": max_iterations, + "optimization_direction": "max", + }) + + +def test_grid_search_grid(grid_search_opt: GridSearchOptimizer, + grid_search_tunables: TunableGroups, + grid_search_tunables_grid: List[Dict[str, TunableValue]]) -> None: + """ + Make sure that grid search optimizer initializes and works correctly. + """ + # Check the size. + expected_grid_size = math.prod(tunable.cardinality for tunable, _group in grid_search_tunables) + assert expected_grid_size > len(grid_search_tunables) + assert len(grid_search_tunables_grid) == expected_grid_size + # Check for specific example configs inclusion. + expected_config_example: Dict[str, TunableValue] = { + "cat": "a", + "int": 2, + "float": 0.75, + } + grid_search_opt_pending_configs = list(grid_search_opt.pending_configs) + assert expected_config_example in grid_search_tunables_grid + assert expected_config_example in grid_search_opt_pending_configs + # Check the rest of the contents. + # Note: ConfigSpace param name vs TunableGroup parameter name order is not + # consistent, so we need to full dict comparison. + assert len(grid_search_opt_pending_configs) == expected_grid_size + assert all(config in grid_search_tunables_grid for config in grid_search_opt_pending_configs) + assert all(config in grid_search_opt_pending_configs for config in grid_search_tunables_grid) + # Order is less relevant to us, so we'll just check that the sets are the same. + # assert grid_search_opt.pending_configs == grid_search_tunables_grid + + +def test_grid_search(grid_search_opt: GridSearchOptimizer, + grid_search_tunables: TunableGroups, + grid_search_tunables_grid: List[Dict[str, TunableValue]]) -> None: + """ + Make sure that grid search optimizer initializes and works correctly. + """ + score = 1.0 + status = Status.SUCCEEDED + suggestion = grid_search_opt.suggest() + suggestion_dict = suggestion.get_param_values() + default_config = grid_search_tunables.restore_defaults().get_param_values() + + # First suggestion should be the defaults. + assert suggestion.get_param_values() == default_config + # But that shouldn't be the first element in the grid search. + assert suggestion_dict != next(iter(grid_search_tunables_grid)) + # The suggestion should no longer be in the pending_configs. + assert suggestion_dict not in grid_search_opt.pending_configs + # But it should be in the suggested_configs now (and the only one). + assert list(grid_search_opt.suggested_configs) == [default_config] + + # Register a score for that suggestion. + grid_search_opt.register(suggestion, status, score) + # Now it shouldn't be in the suggested_configs. + assert len(list(grid_search_opt.suggested_configs)) == 0 + + grid_search_tunables_grid.remove(default_config) + assert default_config not in grid_search_opt.pending_configs + assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs) + assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid) + + # The next suggestion should be a different element in the grid search. + suggestion = grid_search_opt.suggest() + suggestion_dict = suggestion.get_param_values() + assert suggestion_dict != default_config + assert suggestion_dict not in grid_search_opt.pending_configs + assert suggestion_dict in grid_search_opt.suggested_configs + grid_search_opt.register(suggestion, status, score) + assert suggestion_dict not in grid_search_opt.pending_configs + assert suggestion_dict not in grid_search_opt.suggested_configs + + grid_search_tunables_grid.remove(suggestion.get_param_values()) + assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs) + assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid) + + # FIXME: Should we consider not_converged as the "max_iterations", an empty grid, or both? + + # Try to empty the rest of the grid. + while grid_search_opt.not_converged(): + suggestion = grid_search_opt.suggest() + grid_search_opt.register(suggestion, status, score) + + # The grid search should be empty now. + assert not list(grid_search_opt.pending_configs) + assert not list(grid_search_opt.suggested_configs) + assert not grid_search_opt.not_converged() + + # But if we still have iterations left, we should be able to suggest again by refilling the grid. + assert grid_search_opt.current_iteration < grid_search_opt.max_iterations + assert grid_search_opt.suggest() + assert list(grid_search_opt.pending_configs) + assert list(grid_search_opt.suggested_configs) + assert grid_search_opt.not_converged() + + # Try to finish the rest of our iterations by repeating the grid. + while grid_search_opt.not_converged(): + suggestion = grid_search_opt.suggest() + grid_search_opt.register(suggestion, status, score) + assert not grid_search_opt.not_converged() + assert grid_search_opt.current_iteration >= grid_search_opt.max_iterations + assert list(grid_search_opt.pending_configs) + assert list(grid_search_opt.suggested_configs) + + +def test_grid_search_async_order(grid_search_opt: GridSearchOptimizer) -> None: + """ + Make sure that grid search optimizer works correctly when suggest and register + are called out of order. + """ + score = 1.0 + status = Status.SUCCEEDED + suggest_count = 10 + suggested = [grid_search_opt.suggest() for _ in range(suggest_count)] + suggested_shuffled = suggested.copy() + # Try to ensure the shuffled list is different. + for _ in range(3): + random.shuffle(suggested_shuffled) + if suggested_shuffled != suggested: + break + assert suggested != suggested_shuffled + + for suggestion in suggested_shuffled: + suggestion_dict = suggestion.get_param_values() + assert suggestion_dict not in grid_search_opt.pending_configs + assert suggestion_dict in grid_search_opt.suggested_configs + grid_search_opt.register(suggestion, status, score) + assert suggestion_dict not in grid_search_opt.suggested_configs + + best_score, best_config = grid_search_opt.get_best_observation() + assert best_score == score + + # test re-register with higher score + best_suggestion = suggested_shuffled[0] + best_suggestion_dict = best_suggestion.get_param_values() + assert best_suggestion_dict not in grid_search_opt.pending_configs + assert best_suggestion_dict not in grid_search_opt.suggested_configs + best_suggestion_score = score - 1 if grid_search_opt.direction == "min" else score + 1 + grid_search_opt.register(best_suggestion, status, best_suggestion_score) + assert best_suggestion_dict not in grid_search_opt.suggested_configs + + best_score, best_config = grid_search_opt.get_best_observation() + assert best_score == best_suggestion_score + assert best_config == best_suggestion + + # Check bulk register + suggested = [grid_search_opt.suggest() for _ in range(suggest_count)] + assert all(suggestion.get_param_values() not in grid_search_opt.pending_configs for suggestion in suggested) + assert all(suggestion.get_param_values() in grid_search_opt.suggested_configs for suggestion in suggested) + + # Those new suggestions also shouldn't be in the set of previously suggested configs. + assert all(suggestion.get_param_values() not in suggested_shuffled for suggestion in suggested) + + grid_search_opt.bulk_register([suggestion.get_param_values() for suggestion in suggested], + [score] * len(suggested), + [status] * len(suggested)) + + assert all(suggestion.get_param_values() not in grid_search_opt.pending_configs for suggestion in suggested) + assert all(suggestion.get_param_values() not in grid_search_opt.suggested_configs for suggestion in suggested) + + best_score, best_config = grid_search_opt.get_best_observation() + assert best_score == best_suggestion_score + assert best_config == best_suggestion
mlos_bench: grid search support In benchmarking only mode, sometimes we actually do want to use grid search to evaluate all possibilities for comparison sake. This can also be useful when comparing more efficient optimizer search strategy performance as well. We should be able to implement this as an additional mlos_bench optimizer in combination with the storage backend to check which ones have already been evaluated.
This btw would be a great place for parallel trial execution (#380)
2024-02-26T23:00:07
platformsh/platformsh-docs
14
platformsh__platformsh-docs-14
[ "13" ]
dd2ed5381a21e5ec27da60977d69df2359cf69aa
diff --git a/_ext/github.py b/_ext/github.py new file mode 100644 --- /dev/null +++ b/_ext/github.py @@ -0,0 +1,31 @@ +import os +import warnings + +# Loosely based on https://gist.github.com/mgedmin/6052926 (BSD) + +def get_github_url(app, view, path): + return 'https://github.com/{project}/{view}/{branch}/{path}'.format( + project=app.config.github_project, + view=view, + branch=app.config.github_branch, + path=path, + ) + +def html_page_context(app, pagename, templatename, context, doctree): + if templatename != 'page.html': + return + + if not app.config.github_project: + return + + path = os.path.relpath(doctree.get('source'), app.builder.srcdir) + show_url = get_github_url(app, 'blob', path) + edit_url = get_github_url(app, 'edit', path) + + context['github_show_url'] = show_url + context['github_edit_url'] = edit_url + +def setup(app): + app.add_config_value('github_project', '', True) + app.add_config_value('github_branch', 'master', True) + app.connect('html-page-context', html_page_context) diff --git a/conf.py b/conf.py --- a/conf.py +++ b/conf.py @@ -2,6 +2,7 @@ import sys, os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '_ext')) # -- General configuration ----------------------------------------------------- @@ -10,7 +11,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.viewcode', 'sphinx.ext.todo'] +extensions = ['sphinx.ext.viewcode', 'sphinx.ext.todo', 'github'] # Set this to False if you don't want to display the todo. todo_include_todos = False @@ -31,6 +32,9 @@ project = u'Documentation' copyright = u'2014, Commerce Guys' +github_project = 'platformsh/platformsh-docs' +github_branch = 'dev' + # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. @@ -128,7 +132,7 @@ # Custom sidebar templates, maps document names to template names. html_sidebars = { - '**': ['searchbox.html', 'localtoc.html', 'support.html'], + '**': ['searchbox.html', 'localtoc.html', 'support.html', 'sourcelink.html'], } # Additional templates that should be rendered to pages, maps page names to @@ -145,7 +149,7 @@ #html_split_index = False # If true, links to the reST sources are added to the pages. -html_show_sourcelink = False +html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False
Document the file location for build hooks It turns out that when you run a build hook, the files are stored in `/app/out`. Example hook to manually move a `settings.php` for a profile: ``` build: | PROFILE=my_profile; ROOT=/app/out/public; rm $ROOT/sites/default/settings.php; mv $ROOT/profiles/$PROFILE/settings.php $ROOT/sites/default/; ```
2014-09-28T11:14:14
platformsh/platformsh-docs
1,859
platformsh__platformsh-docs-1859
[ "1665" ]
d7e191aaae7595a1a9acb980671890b143cd3727
diff --git a/search/main.py b/search/main.py --- a/search/main.py +++ b/search/main.py @@ -23,7 +23,7 @@ def __init__(self): # Data available to the dropdown React app in docs, used to fill out autocomplete results. self.displayed_attributes = ['title', 'text', 'url', 'site', 'section'] # Data actually searchable by our queries. - self.searchable_attributes = ['title', 'text', 'section'] + self.searchable_attributes = ['title', 'text', 'url', 'section'] # Show results for one query with the listed pages, when they by default would not show up as best results. Note: these # are not automatically two-way, so that's why they all appear to be defined twice.
Poor search results If I search on https://docs.platform.sh for "routes", I get the following results: * https://docs.platform.sh/administration/web/configure-environment.html#routes * https://docs.platform.sh/gettingstarted/introduction/own-code/routes-configuration.html * https://docs.platform.sh/configuration/app/upgrading.html#platformroutesyaml * https://docs.platform.sh/guides/gatsby/headless/drupal.html#platformroutesyaml * https://docs.platform.sh/guides/gatsby/headless/strapi.html#platformroutesyaml * https://docs.platform.sh/guides/gatsby/headless/wordpress.html#platformroutesyaml * https://docs.platform.sh/guides/drupal9/deploy/configure.html#requests-configuration-routesyaml Note the absence of the main resource on this topic: https://docs.platform.sh/configuration/routes.html I've also tried "routes.yaml" and "configure routes" neither of which return the main routing configuration page. The same issue appears to afflict these pages as well which seem near-impossible to locate by search: * https://docs.platform.sh/configuration/services.html * https://docs.platform.sh/configuration/app.html Doing a bit of digging if I make the same search query with 1000 results limit the routes URL does not show up in the results, so perhaps it's not being indexed.
2021-08-16T18:45:39
platformsh/platformsh-docs
2,079
platformsh__platformsh-docs-2079
[ "2061" ]
6cf494ec20a90df094a65d6fbef58d5e18042db9
diff --git a/search/main.py b/search/main.py --- a/search/main.py +++ b/search/main.py @@ -21,9 +21,9 @@ def __init__(self): # Below are Platform.sh custom settings for how the search engine functions. # Data available to the dropdown React app in docs, used to fill out autocomplete results. - self.displayed_attributes = ['title', 'text', 'url', 'site', 'section'] + self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section'] # Data actually searchable by our queries. - self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text'] + self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url'] # Show results for one query with the listed pages, when they by default would not show up as best results. # Note: these aren't automatically two-way, which is why they're all defined twice.
Add keywords for search ### Where on docs.platform.sh should be changed? /configuration/app/app-reference.html ### What exactly should be updated? We'd like specific pages to be findable by searching for specific words. For example, the app reference when searching for `.platform.app.yaml` (this may also involve a problem with escaping characters like `.`). Add keywords or other to make these pages findable. ### Additional context _No response_
2022-01-07T12:52:01
platformsh/platformsh-docs
2,105
platformsh__platformsh-docs-2105
[ "2084" ]
13fe0cb2c146c4622eccfddd089699cc48e8e7fb
diff --git a/search/main.py b/search/main.py --- a/search/main.py +++ b/search/main.py @@ -119,10 +119,14 @@ def update(self): # Delete previous index if len(client.get_indexes()): - client.get_index(self.docs_index).delete() + client.index(self.docs_index).delete() # Create a new index - index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name}) + create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name}) + + client.wait_for_task(create_index_task['uid']) + + index = client.get_index(create_index_task['indexUid']) # Add synonyms for the index index.update_synonyms(self.synonyms)
:arrow_up: Update MeiliSearch ### What needs to be documented? We should update MeiliSearch to version 0.25. This means we have to change how we manage API keys. It does mean we can create more keys for monitoring and the like. ### Additional context [Blog post on version](https://blog.meilisearch.com/whats-new-in-v0-25/) [Additional context](https://github.com/orgs/platformsh/projects/3)
2022-01-26T08:13:27
platformsh/platformsh-docs
2,432
platformsh__platformsh-docs-2432
[ "2423" ]
79dd36ffbd0d1a1ef5b8390552ac3d03d6aef60c
diff --git a/search/main.py b/search/main.py --- a/search/main.py +++ b/search/main.py @@ -59,7 +59,7 @@ def __init__(self): # - sort: queries are sorted at query time # - exactness: similarity of matched words in document with query - self.ranking_rules = ["rank:asc", "attribute", "typo", "words", "proximity", "exactness"] + self.ranking_rules = ["rank:asc", "exactness", "attribute", "proximity", "typo", "words"] self.updated_settings = { "rankingRules": self.ranking_rules,
🐛 Allow searching for code strings with separators ### Where on docs.platform.sh should be changed? The search ### What exactly should be updated? Searching for strings with separators like `X-Frame-Options` and `memory_ratio` don't show the results for pages that have those strings directly. Putting quotes around the strings doesn't help. We'd like people to be able to get info on specific properties and strings, so the search should return these results. ### Additional context _No response_
2022-06-20T19:43:48
platformsh/platformsh-docs
2,485
platformsh__platformsh-docs-2485
[ "2478" ]
455a331267c9adc2820fecdb7e8dc9038ace37cd
diff --git a/search/docs/middlewares.py b/search/docs/middlewares.py --- a/search/docs/middlewares.py +++ b/search/docs/middlewares.py @@ -6,6 +6,7 @@ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals +from scrapy.exceptions import IgnoreRequest class DocsSpiderMiddleware(object): @@ -61,13 +62,6 @@ class DocsDownloaderMiddleware(object): # scrapy acts as if the downloader middleware does not modify the # passed objects. - @classmethod - def from_crawler(cls, crawler): - # This method is used by Scrapy to create your spiders. - s = cls() - crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) - return s - def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. @@ -78,26 +72,7 @@ def process_request(self, request, spider): # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called - return None - - def process_response(self, request, response, spider): - # Called with the response returned from the downloader. + if request.url.endswith(".png") or request.url.endswith(".jpg"): + raise IgnoreRequest - # Must either; - # - return a Response object - # - return a Request object - # - or raise IgnoreRequest - return response - - def process_exception(self, request, exception, spider): - # Called when a download handler or a process_request() - # (from other downloader middleware) raises an exception. - - # Must either: - # - return None: continue processing this exception - # - return a Response object: stops process_exception() chain - # - return a Request object: stops process_exception() chain - pass - - def spider_opened(self, spider): - spider.logger.info('Spider opened: %s' % spider.name) + return None diff --git a/search/docs/settings.py b/search/docs/settings.py --- a/search/docs/settings.py +++ b/search/docs/settings.py @@ -52,9 +52,9 @@ # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html -#DOWNLOADER_MIDDLEWARES = { -# 'docs.middlewares.DocsDownloaderMiddleware': 543, -#} +DOWNLOADER_MIDDLEWARES = { + 'docs.middlewares.DocsDownloaderMiddleware': 543, +} # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html
Bump scrapy from 2.6.1 to 2.6.2 in /search Bumps [scrapy](https://github.com/scrapy/scrapy) from 2.6.1 to 2.6.2. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/scrapy/scrapy/releases">scrapy's releases</a>.</em></p> <blockquote> <h2>2.6.2</h2> <p>Fixes a <strong>security issue</strong> around HTTP proxy usage, and addresses a few regressions introduced in Scrapy 2.6.0.</p> <p>See the <strong><a href="https://docs.scrapy.org/en/2.6/news.html#scrapy-2-6-2-2022-07-25">changelog</a></strong>.</p> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/scrapy/scrapy/blob/master/docs/news.rst">scrapy's changelog</a>.</em></p> <blockquote> <h2>Scrapy 2.6.2 (2022-07-25)</h2> <p><strong>Security bug fix:</strong></p> <ul> <li> <p>When :class:<code>~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware</code> processes a request with :reqmeta:<code>proxy</code> metadata, and that :reqmeta:<code>proxy</code> metadata includes proxy credentials, :class:<code>~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware</code> sets the <code>Proxy-Authentication</code> header, but only if that header is not already set.</p> <p>There are third-party proxy-rotation downloader middlewares that set different :reqmeta:<code>proxy</code> metadata every time they process a request.</p> <p>Because of request retries and redirects, the same request can be processed by downloader middlewares more than once, including both :class:<code>~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware</code> and any third-party proxy-rotation downloader middleware.</p> <p>These third-party proxy-rotation downloader middlewares could change the :reqmeta:<code>proxy</code> metadata of a request to a new value, but fail to remove the <code>Proxy-Authentication</code> header from the previous value of the :reqmeta:<code>proxy</code> metadata, causing the credentials of one proxy to be sent to a different proxy.</p> <p>To prevent the unintended leaking of proxy credentials, the behavior of :class:<code>~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware</code> is now as follows when processing a request:</p> <ul> <li> <p>If the request being processed defines :reqmeta:<code>proxy</code> metadata that includes credentials, the <code>Proxy-Authorization</code> header is always updated to feature those credentials.</p> </li> <li> <p>If the request being processed defines :reqmeta:<code>proxy</code> metadata without credentials, the <code>Proxy-Authorization</code> header is removed <em>unless</em> it was originally defined for the same proxy URL.</p> <p>To remove proxy credentials while keeping the same proxy URL, remove the <code>Proxy-Authorization</code> header.</p> </li> <li> <p>If the request has no :reqmeta:<code>proxy</code> metadata, or that metadata is a falsy value (e.g. <code>None</code>), the <code>Proxy-Authorization</code> header is removed.</p> <p>It is no longer possible to set a proxy URL through the :reqmeta:<code>proxy</code> metadata but set the credentials through the <code>Proxy-Authorization</code> header. Set proxy credentials through the :reqmeta:<code>proxy</code> metadata instead.</p> </li> </ul> </li> </ul> <!-- raw HTML omitted --> </blockquote> <p>... (truncated)</p> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/scrapy/scrapy/commit/aecbccbaa567b07694141a4503e9abf1bb2c919f"><code>aecbccb</code></a> Bump version: 2.6.1 → 2.6.2</li> <li><a href="https://github.com/scrapy/scrapy/commit/af7dd16d8ded3e6cb2946603688f4f4a5212e80f"><code>af7dd16</code></a> Merge pull request from GHSA-9x8m-2xpf-crp3</li> <li><a href="https://github.com/scrapy/scrapy/commit/42056090516bb0cc5d349e232298c711ec452bc5"><code>4205609</code></a> Fixed intersphinx references</li> <li><a href="https://github.com/scrapy/scrapy/commit/e3e69d1209407c72a6478936bdbfd32cc22e9432"><code>e3e69d1</code></a> Pin documentation requirements (<a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5536">#5536</a>)</li> <li><a href="https://github.com/scrapy/scrapy/commit/54bfb9649bdec565f9798cc41643ed1bae25bd67"><code>54bfb96</code></a> Cover <a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5525">#5525</a> in the 2.6.2 release notes (<a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5535">#5535</a>)</li> <li><a href="https://github.com/scrapy/scrapy/commit/4ef71829b22b7362d08d6897090595138107852f"><code>4ef7182</code></a> If TWISTED_REACTOR is None, reuse any pre-installed reactor (<a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5528">#5528</a>)</li> <li><a href="https://github.com/scrapy/scrapy/commit/1c1cd5d8eae48eade88560426499846d217a555f"><code>1c1cd5d</code></a> Update the 2.6.2 release notes</li> <li><a href="https://github.com/scrapy/scrapy/commit/84c29a286f6b9bc94a5318ca68cd4fc21244443a"><code>84c29a2</code></a> Unset the release date of still-unreleased 2.6.2 (<a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5503">#5503</a>)</li> <li><a href="https://github.com/scrapy/scrapy/commit/b9b9422bb1025d00c5113994490ac71a162d775f"><code>b9b9422</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/scrapy/scrapy/issues/5482">#5482</a> from alexpdev/parse_help_msg</li> <li><a href="https://github.com/scrapy/scrapy/commit/915c288205e2b9a0bdbbe18cc67cd23ba5bb4de3"><code>915c288</code></a> edit</li> <li>Additional commits viewable in <a href="https://github.com/scrapy/scrapy/compare/2.6.1...2.6.2">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=scrapy&package-manager=pip&previous-version=2.6.1&new-version=2.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) - `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language - `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language - `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language - `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/platformsh/platformsh-docs/network/alerts). </details>
2022-08-04T07:34:27
interactions-py/interactions.py
86
interactions-py__interactions.py-86
[ "84" ]
d40d98c48a6349bbd1088aedf3755f27a4a48892
diff --git a/discord_slash/error.py b/discord_slash/error.py --- a/discord_slash/error.py +++ b/discord_slash/error.py @@ -48,3 +48,8 @@ class CheckFailure(SlashCommandError): """ Command check has failed. """ + +class IncorrectType(SlashCommandError): + """ + Type passed was incorrect + """ diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -176,8 +176,9 @@ def from_type(cls, t: type): :return: :class:`.model.SlashCommandOptionType` or ``None`` """ if issubclass(t, str): return cls.STRING - if issubclass(t, int): return cls.INTEGER if issubclass(t, bool): return cls.BOOLEAN + # The check for bool MUST be above the check for integers as booleans subclass integers + if issubclass(t, int): return cls.INTEGER if issubclass(t, discord.abc.User): return cls.USER if issubclass(t, discord.abc.GuildChannel): return cls.CHANNEL if issubclass(t, discord.abc.Role): return cls.ROLE diff --git a/discord_slash/utils/manage_commands.py b/discord_slash/utils/manage_commands.py --- a/discord_slash/utils/manage_commands.py +++ b/discord_slash/utils/manage_commands.py @@ -2,7 +2,7 @@ import inspect import asyncio import aiohttp -from ..error import RequestFailure +from ..error import RequestFailure, IncorrectType from ..model import SlashCommandOptionType from collections.abc import Callable @@ -144,7 +144,7 @@ async def remove_all_commands_in(bot_id, def create_option(name: str, description: str, - option_type: int, + option_type: typing.Union[int, type], required: bool, choices: list = None) -> dict: """ @@ -157,6 +157,11 @@ def create_option(name: str, :param choices: Choices of the option. Can be empty. :return: dict """ + if not isinstance(option_type, int) or isinstance(option_type, bool): #Bool values are a subclass of int + original_type = option_type + option_type = SlashCommandOptionType.from_type(original_type) + if option_type is None: + raise IncorrectType(f"The type {original_type} is not recognized as a type that Discord accepts for slash commands.") return { "name": name, "description": description,
Allow passing of types to discord_slash.utils.manage_commands.create_option It would be nice to be able to pass a type, such as `str` or `discord.Role` to the `option_type` argument of the function `discord_slash.utils.manage_commands.create_option` There may be other functions within that module which this could apply to, I haven't checked in depth. All it would need to do is map the type to an integer. For example: `str` would be `3`
This will be implemented in the next release. Correct me if I'm wrong, but as far as I know getting options from the type hints of a function is planned. They are talking about `discord_slash.utils.manage_commands.create_option` which currently doesn't accept `str, int, discord.[...] etc` as the `option_type` argument on the dev branch. I haven't seen anything planned *specifically* about this `model.SlashCommandOptionType.from_type(str)` -> would return string type which can be set in `create_option` Building on this, I wish `model.SlashCommandOptionType` received a concatenation to `OptionType`. Pretty redundant to repeat something already known in the model script when it's an enum type. I don't know if @eunwoo1104 wants to change that. Alternatively, you could just do `import discord_slash.model.SlashCommandOptionType as OptionType`. Would integrating `SlashCommandOptionType` into `create_option` be a possible idea? so that you don't have to do `create_option(option_type=SlashCommandOptionType.from_type(type), [...])` > This will be implemented in the next release. This is not planned feature. > Would integrating `SlashCommandOptionType` into `create_option` be a possible idea? > so that you don't have to do `create_option(option_type=SlashCommandOptionType.from_type(type), [...])` This seems to be the good idea. I'll implement this when I get some times.
2021-02-03T11:02:31
interactions-py/interactions.py
89
interactions-py__interactions.py-89
[ "88" ]
6295b4c76c2f2d978c25b325b1b77d592fdaf3d3
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -425,8 +425,8 @@ def add_subcommand(self, name = name.lower() description = description or getdoc(cmd) - if name in self.commands: - tgt = self.commands[name] + if base in self.commands: + tgt = self.commands[base] for x in tgt.allowed_guild_ids: if x not in guild_ids: guild_ids.append(x)
Subcommands are registered to guilds where there are not allowed I've noticed that when I use `guild_ids` on a subcommand for a command which is shared between multiple guilds, that subcommand is registered to all guilds where one of its subcommands is allowed. ## Steps 1. Register a subcommand `s1` for command `c` for **Guild A**: ```python @slash.subcommand( base="c", name="s1", guilds_ids=[GUILD_A_ID], ) def _handle(ctx): # .... ``` 1. Register a subcommand `s2` for command `c` for **Guild B**: ```python @slash.subcommand( base="c", name="s2", guilds_ids=[GUILD_A_ID], ) def _handle(ctx): # .... ``` ## Expected behavior **Guild A** has `/c s1` command only and **Guild B** has `/c s2` command only. ## Actual behavior **Guild A** has `/c s1` and `/c s2` but only can use `/c s1`, and **Guild B** has `/c s1` and `/c s2` but only can use `/c s2`.
I need to fix this ASAP. And I think, I can do it by fixing [`Client#to_dict`](https://github.com/eunwoo1104/discord-py-slash-command/blob/master/discord_slash/client.py#L173) so that it makes copies of the commands with appropriate subcommand for each guild.
2021-02-06T04:02:09
interactions-py/interactions.py
96
interactions-py__interactions.py-96
[ "88" ]
e3bf04887c40665b87283d078cd8eb708cc5aa23
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -1,3 +1,4 @@ +import copy import logging import typing import discord @@ -104,16 +105,16 @@ def get_cog_commands(self, cog: commands.Cog): self.commands[x.name] = x else: if x.base in self.commands: - for i in self.commands[x.base].allowed_guild_ids: - if i not in x.allowed_guild_ids: - x.allowed_guild_ids.append(i) + for i in x.allowed_guild_ids: + if i not in self.commands[x.base].allowed_guild_ids: + self.commands[x.base].allowed_guild_ids.append(i) self.commands[x.base].has_subcommands = True else: _cmd = { "func": None, "description": x.base_description, "auto_convert": {}, - "guild_ids": x.allowed_guild_ids, + "guild_ids": x.allowed_guild_ids.copy(), "api_options": [], "has_subcommands": True } @@ -188,65 +189,88 @@ async def to_dict(self): Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_ """ await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand. + all_guild_ids = [] + for x in self.commands: + for i in self.commands[x].allowed_guild_ids: + if i not in all_guild_ids: + all_guild_ids.append(i) cmds = { "global": [], - "guild": {} + "guild": {x: [] for x in all_guild_ids} } + wait = {} # Before merging to return dict, let's first put commands to temporary dict. for x in self.commands: selected = self.commands[x] - if selected.has_subcommands and selected.func: - # Registering both subcommand and command with same base name / name - # will result in only subcommand being registered, - # so we will warn this at registering subcommands. - self.logger.warning(f"Detected command name with same subcommand base name! " - f"This command will only have subcommand: {x}") - - options = [] - if selected.has_subcommands: - tgt = self.subcommands[x] - for y in tgt: - sub = tgt[y] - if isinstance(sub, model.SubcommandObject): - _dict = { - "name": sub.name, - "description": sub.description or "No Description.", - "type": model.SlashCommandOptionType.SUB_COMMAND, - "options": sub.options or [] - } - options.append(_dict) - else: - base_dict = { - "name": y, - "description": "No Description.", - "type": model.SlashCommandOptionType.SUB_COMMAND_GROUP, - "options": [] - } - for z in sub: - sub_sub = sub[z] - _dict = { - "name": sub_sub.name, - "description": sub_sub.description or "No Description.", - "type": model.SlashCommandOptionType.SUB_COMMAND, - "options": sub_sub.options or [] - } - base_dict["options"].append(_dict) - if sub_sub.subcommand_group_description: - base_dict["description"] = sub_sub.subcommand_group_description - options.append(base_dict) - command_dict = { "name": x, "description": selected.description or "No Description.", - "options": selected.options if not options else options + "options": selected.options or [] } if selected.allowed_guild_ids: for y in selected.allowed_guild_ids: - try: - cmds["guild"][y].append(command_dict) - except KeyError: - cmds["guild"][y] = [command_dict] + if y not in wait: + wait[y] = {} + wait[y][x] = copy.deepcopy(command_dict) + else: + if "global" not in wait: + wait["global"] = {} + wait["global"][x] = copy.deepcopy(command_dict) + + # Separated normal command add and subcommand add not to + # merge subcommands to one. More info at Issue #88 + # https://github.com/eunwoo1104/discord-py-slash-command/issues/88 + + for x in self.commands: + if not self.commands[x].has_subcommands: + continue + tgt = self.subcommands[x] + for y in tgt: + sub = tgt[y] + if isinstance(sub, model.SubcommandObject): + _dict = { + "name": sub.name, + "description": sub.description or "No Description.", + "type": model.SlashCommandOptionType.SUB_COMMAND, + "options": sub.options or [] + } + if sub.allowed_guild_ids: + for z in sub.allowed_guild_ids: + wait[z][x]["options"].append(_dict) + else: + wait["global"][x]["options"].append(_dict) + else: + queue = {} + base_dict = { + "name": y, + "description": "No Description.", + "type": model.SlashCommandOptionType.SUB_COMMAND_GROUP, + "options": [] + } + for z in sub: + sub_sub = sub[z] + _dict = { + "name": sub_sub.name, + "description": sub_sub.description or "No Description.", + "type": model.SlashCommandOptionType.SUB_COMMAND, + "options": sub_sub.options or [] + } + if sub_sub.allowed_guild_ids: + for i in sub_sub.allowed_guild_ids: + if i not in queue: + queue[i] = copy.deepcopy(base_dict) + queue[i]["options"].append(_dict) + else: + if "global" not in queue: + queue["global"] = copy.deepcopy(base_dict) + queue["global"]["options"].append(_dict) + for i in queue: + wait[i][x]["options"].append(queue[i]) + + for x in wait: + if x == "global": + [cmds["global"].append(n) for n in wait["global"].values()] else: - cmds["global"].append(command_dict) + [cmds["guild"][x].append(n) for n in wait[x].values()] return cmds @@ -326,8 +350,10 @@ def add_slash_command(self, "connector": connector or {}, "has_subcommands": has_subcommands } - self.commands[name] = model.CommandObject(name, _cmd) + obj = model.CommandObject(name, _cmd) + self.commands[name] = obj self.logger.debug(f"Added command `{name}`") + return obj def add_subcommand(self, cmd, @@ -371,10 +397,9 @@ def add_subcommand(self, description = description or getdoc(cmd) if base in self.commands: - tgt = self.commands[base] - for x in tgt.allowed_guild_ids: - if x not in guild_ids: - guild_ids.append(x) + for x in guild_ids: + if x not in self.commands[base].allowed_guild_ids: + self.commands[base].allowed_guild_ids.append(x) if options is None: options = manage_commands.generate_options(cmd, description) @@ -382,7 +407,7 @@ def add_subcommand(self, _cmd = { "func": None, "description": base_description, - "guild_ids": guild_ids, + "guild_ids": guild_ids.copy(), "api_options": [], "connector": {}, "has_subcommands": True @@ -401,7 +426,6 @@ def add_subcommand(self, self.commands[base] = model.CommandObject(base, _cmd) else: self.commands[base].has_subcommands = True - self.commands[base].allowed_guild_ids = guild_ids if self.commands[base].description: _cmd["description"] = self.commands[base].description if base not in self.subcommands: @@ -411,12 +435,15 @@ def add_subcommand(self, self.subcommands[base][subcommand_group] = {} if name in self.subcommands[base][subcommand_group]: raise error.DuplicateCommand(f"{base} {subcommand_group} {name}") - self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group) + obj = model.SubcommandObject(_sub, base, name, subcommand_group) + self.subcommands[base][subcommand_group][name] = obj else: if name in self.subcommands[base]: raise error.DuplicateCommand(f"{base} {name}") - self.subcommands[base][name] = model.SubcommandObject(_sub, base, name) + obj = model.SubcommandObject(_sub, base, name) + self.subcommands[base][name] = obj self.logger.debug(f"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`") + return obj def slash(self, *, @@ -485,8 +512,8 @@ async def _pick(ctx, choice1, choice2): # Command with 1 or more args. guild_ids = [guild_id] def wrapper(cmd): - self.add_slash_command(cmd, name, description, guild_ids, options, connector) - return cmd + obj = self.add_slash_command(cmd, name, description, guild_ids, options, connector) + return obj return wrapper @@ -557,8 +584,8 @@ async def _group_kick_user(ctx, user): subcommand_group_description = subcommand_group_description or sub_group_desc def wrapper(cmd): - self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, guild_ids, options, connector) - return cmd + obj = self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, guild_ids, options, connector) + return obj return wrapper
Subcommands are registered to guilds where there are not allowed I've noticed that when I use `guild_ids` on a subcommand for a command which is shared between multiple guilds, that subcommand is registered to all guilds where one of its subcommands is allowed. ## Steps 1. Register a subcommand `s1` for command `c` for **Guild A**: ```python @slash.subcommand( base="c", name="s1", guilds_ids=[GUILD_A_ID], ) def _handle(ctx): # .... ``` 1. Register a subcommand `s2` for command `c` for **Guild B**: ```python @slash.subcommand( base="c", name="s2", guilds_ids=[GUILD_A_ID], ) def _handle(ctx): # .... ``` ## Expected behavior **Guild A** has `/c s1` command only and **Guild B** has `/c s2` command only. ## Actual behavior **Guild A** has `/c s1` and `/c s2` but only can use `/c s1`, and **Guild B** has `/c s1` and `/c s2` but only can use `/c s2`.
I need to fix this ASAP. And I think, I can do it by fixing [`Client#to_dict`](https://github.com/eunwoo1104/discord-py-slash-command/blob/master/discord_slash/client.py#L173) so that it makes copies of the commands with appropriate subcommand for each guild. This happens due to merging subcommands to one command add body when requesting to API. If possible, fixing this will require complete redesign of auto-registering. ~~Unfortunately I'm not sure if it will work since subcommands is actually one of the option type of a command.~~ Nevermind, I just realized this was about seperate guild command. @eunwoo1104 after few hours of digging I realized that this really needs a complete redesign of the current merging. There are many corner cases, for example: 1. One subcommand is a guild command and another is not. 2. One subcommand in subgroup is a guild command and another is not. So anytime there is guild subcommand/subgroup and global subcommand/subgroup you have to split the command into several: a command with a global subcommand + guild specific sub command and repeat that for allow guild commands. It increases the number of requests and memory usage with the current implementation exponentially. Also, it's [unclear what Discord will do with guild commands](https://github.com/discord/discord-api-docs/issues/2336#issuecomment-761147187) and [permissions model](https://github.com/discord/discord-api-docs/issues/2315#issuecomment-761131184) in general. I still prefer this issue to stay open just for other members to be aware of it. It maybe makes sense to add a warning to not use different guild ids for subcommands. I've been thinking about this and there's even more problems when you take into consideration that commands in different guilds *could* also have different descriptions, options, etc. This is not going to be a simple fix, possibly requiring a rewrite of how commands are created by the user. I wouldn't worry about this increasing the amount of requests made though, it already makes at least one request per guild, and i don't think this would actually increase the amount at all.
2021-02-12T15:20:51
interactions-py/interactions.py
127
interactions-py__interactions.py-127
[ "115" ]
4d974f544dd47585c1af67a1c78b87f2b51e2e31
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -691,12 +691,15 @@ async def invoke_command(self, func, ctx, args): try: not_kwargs = False if isinstance(args, dict): + ctx.kwargs = args + ctx.args = list(args.values()) try: await func.invoke(ctx, **args) except TypeError: args = list(args.values()) not_kwargs = True else: + ctx.args = args not_kwargs = True if not_kwargs: await func.invoke(ctx, *args) diff --git a/discord_slash/context.py b/discord_slash/context.py --- a/discord_slash/context.py +++ b/discord_slash/context.py @@ -18,6 +18,8 @@ class SlashContext: :ivar message: Message that invoked the slash command. :ivar name: Name of the command. + :ivar args: List of processed arguments invoked with the command. + :ivar kwargs: Dictionary of processed arguments invoked with the command. :ivar subcommand_name: Subcommand of the command. :ivar subcommand_group: Subcommand group of the command. :ivar interaction_id: Interaction ID of the command message. @@ -40,6 +42,8 @@ def __init__(self, self.__token = _json["token"] self.message = None # Should be set later. self.name = self.command = self.invoked_with = _json["data"]["name"] + self.args = [] + self.kwargs = {} self.subcommand_name = self.invoked_subcommand = self.subcommand_passed = None self.subcommand_group = self.invoked_subcommand_group = self.subcommand_group_passed = None self.interaction_id = _json["id"]
Add parsed args to SlashContext Regular command contexts have [args/kwargs attributes](https://discordpy.readthedocs.io/en/latest/ext/commands/api.html#discord.ext.commands.Context.args), this is useful for debugging command errors as the user input can be easily accessed by command error events, however, SlashContext doesn't have this (despite it being arguably more important, as there's no message to log). Would be a nice little enhancement.
2021-03-25T11:09:21
interactions-py/interactions.py
152
interactions-py__interactions.py-152
[ "151" ]
9550980fdc095769713c6168290551f61b1791f9
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -349,7 +349,7 @@ async def sync_all_commands(self, delete_from_unused_guilds=False): for guild in other_guilds: with suppress(discord.Forbidden): - existing = self.req.get_all_commands(guild_id = guild) + existing = await self.req.get_all_commands(guild_id = guild) if len(existing) != 0: await self.req.put_slash_commands(slash_commands=[], guild_id=guild)
Missing await in sync_all_commands ``` Task exception was never retrieved future: <Task finished name='Task-26' coro=<SlashCommand.sync_all_commands() done, defined at C:\Users\henry\AppData\Local\Programs\Python\Python39\lib\site-packages\discord_slash\client.py:294> exception=TypeError("object of type 'coroutine' has no len()")> Traceback (most recent call last): File "C:\Users\henry\AppData\Local\Programs\Python\Python39\lib\site-packages\discord_slash\client.py", line 353, in sync_all_commands if len(existing) != 0: TypeError: object of type 'coroutine' has no len() C:\Users\henry\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py:1891: RuntimeWarning: coroutine 'HTTPClient.request' was never awaited handle = None # Needed to break cycles when an exception occurs. RuntimeWarning: Enable tracemalloc to get the object allocation traceback ``` There is a missing `await` [here](https://github.com/eunwoo1104/discord-py-slash-command/blob/cc62eeb22718857ca1603205554834b8f3ac7d76/discord_slash/client.py#L352), due to [`SlashCommandRequest.get_all_commands`](https://github.com/eunwoo1104/discord-py-slash-command/blob/cc62eeb22718857ca1603205554834b8f3ac7d76/discord_slash/http.py#L48) calling [`self.command_request`](https://github.com/eunwoo1104/discord-py-slash-command/blob/cc62eeb22718857ca1603205554834b8f3ac7d76/discord_slash/http.py#L72), both of which are functions; `command_request` returns the result of calling [`self._discord.http.request`](https://github.com/Rapptz/discord.py/blob/8517f1e085df27acd5191d0d0cb2363242be0c29/discord/http.py#L134), which is/returns a coroutine. `sync_all_commands` needs an `await`, but `get_all_commands` and `command_request` should probably be asynchronous and themselves `await` that which they call.
From looking at the source, changing `command_request` would require a lot of functions to be changed to coroutines, but it would make the code much clearer; alternatively, type annotation could help clarify (ie. `def f() -> typing.Coroutine:` or `collections.abc.Coroutine`)
2021-04-13T18:05:42
interactions-py/interactions.py
168
interactions-py__interactions.py-168
[ "167" ]
d5cba045a60aa5f9e3e16d16e7a4520fbdd31e3c
diff --git a/discord_slash/context.py b/discord_slash/context.py --- a/discord_slash/context.py +++ b/discord_slash/context.py @@ -134,9 +134,6 @@ async def send(self, """ Sends response of the slash command. - .. note:: - - Param ``hidden`` doesn't support embed and file. - .. warning:: - Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage. - You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``. @@ -186,8 +183,6 @@ async def send(self, else self.bot.allowed_mentions.to_dict() if self.bot.allowed_mentions else {} } if hidden: - if embeds or files: - self._logger.warning("Embed/File is not supported for `hidden`!") base["flags"] = 64 initial_message = False
Ephemeral (hidden) Messages now support embeds Whenever an embed is sent with the `hidden=True`, a warning is printed : `Embed/File is not supported for hidden!`. But ephemeral messages now support embeds.
2021-05-03T17:53:58
interactions-py/interactions.py
174
interactions-py__interactions.py-174
[ "173" ]
d5cba045a60aa5f9e3e16d16e7a4520fbdd31e3c
diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -336,7 +336,7 @@ async def _slash_edit(self, **fields): async def edit(self, **fields): """Refer :meth:`discord.Message.edit`.""" - if "file" in fields or "files" in fields: + if "file" in fields or "files" in fields or "embeds" in fields: await self._slash_edit(**fields) else: try:
Editing SlashMessage should support multiple embeds Calling `discord_slash.model.SlashMessage.edit` with an `embeds` kwarg does not work. However the official Discord API documentation says that it should be possible [when editing interaction messages](https://discord.com/developers/docs/interactions/slash-commands#edit-original-interaction-response).
2021-05-09T19:43:50
interactions-py/interactions.py
191
interactions-py__interactions.py-191
[ "190" ]
83c009b932fa66d2935cbe5437e792461451c75f
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -887,6 +887,9 @@ async def on_socket_response(self, msg): to_use = msg["d"] + if to_use["type"] not in (1, 2): + return # to only process ack and slash-commands and exclude other interactions like buttons + if to_use["data"]["name"] in self.commands: ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)
Pressing a Button sent by current bot causes exception in on_socket_response When your bot sent a message with new discord buttons - upon pressing on button, following exception arises in console (not critical, but annoying and suboptimal): ``` Ignoring exception in on_socket_response Traceback (most recent call last): File "***\venv\lib\site-packages\discord\client.py", line 343, in _run_event await coro(*args, **kwargs) File "***\venv\lib\site-packages\discord_slash\client.py", line 762, in on_socket_response if to_use["data"]["name"] in self.commands: KeyError: 'name' ``` Steps to reproduce: 1) Have your bot send message containing components (buttons) (for example sent by another python script with discord-components library) 2) Launch your bot with discord-py-slash-command 3) Press on button in the message 4) Exception Reproducable on example from readme ```py import discord from discord.ext import commands from discord_slash import SlashCommand, SlashContext bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) slash = SlashCommand(bot) @slash.slash(name="test") async def _test(ctx: SlashContext): embed = discord.Embed(title="embed test") await ctx.send(content="test", embeds=[embed]) bot.run("token") ``` discord-py-slash-command version: 1.1.2 from pypi
2021-05-26T19:26:31
interactions-py/interactions.py
197
interactions-py__interactions.py-197
[ "195" ]
af5c23e0af2e47ae872ff7058301bb01885f89d1
diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -70,16 +70,18 @@ class CommandData: :ivar name: Name of the command. :ivar description: Description of the command. + :ivar default_permission: Indicates whether users should have permissions to run this command by default. :ivar options: List of :class:`OptionData`. :ivar id: Command id, this is received from discord so may not be present :ivar application_id: The application id of the bot, required only when the application id and bot id are different. (old bots) """ def __init__( - self, name, description, options=None, id=None, application_id=None, version=None, **kwargs + self, name, description, options=None, default_permission=True, id=None, application_id=None, version=None, **kwargs ): self.name = name self.description = description + self.default_permission = default_permission self.id = id self.application_id = application_id self.version = version @@ -96,6 +98,7 @@ def __eq__(self, other): self.name == other.name and self.description == other.description and self.options == other.options + and self.default_permission == other.default_permission ) else: return False
default_permission doesn't have any effect Hi, Thanks a lot for this project! I am trying to restrict the use of a command to a specific role using the following options, though it does not seem to have any effect: ``` "default_permission": False, "permissions": { GUILD_ID : [ create_permission(ROLE_ID, SlashCommandPermissionType.ROLE, True) ] }, ``` Other options I've tried that don't work: - Only setting one "positive" permission to the role - Setting one "negative" permission to the role `@everyone` and one positive to one role (the negative permission has no effect) - Setting default_permission=False without anything else also has no effect. The only way I can disallow a role/user to use it is by explicit adding a negative permission for them, meaning that for admin commands I would need to add a negative permission to all the guild users (because some users don't have roles). I believe that's not the intended behavior and is a bug right?
I use this when I was testing the permission PR, and only user with id 281300452270931969 was able to run the command. From that little code snippet from you, its not enough to pin point any issue with your code tho. ```py @slash.slash(name="test_guild", description="test_guild", default_permission=False, guild_ids=[750556940127436880, 823454213089787914], options=[create_option("test", "lol", SlashCommandOptionType.INTEGER, True)]) @slash.permission(823454213089787914, generate_permissions(allowed_users=[281300452270931969])) async def test_guild(ctx, test): print(test) await ctx.send("test_guild") ``` So I investigated further. It looks like creating a command without adding `default_permission=False` the first time it is created and then adding it later has not effect. I was able to go around the issue by temporarily renaming the command, adding the option and renaming it back to its original name. Here is roughly the script I have, to give you more context, though I'm not sure if that helps. ```python slash = SlashCommand(bot, sync_commands=True, delete_from_unused_guilds=True slash.add_slash_command( name=BASE_COMMAND_NAME, cmd=_base_cmd, guild_ids=[GUILD_ID], options=[], default_permission=False, permissions={ GUILD_ID: [ create_permission(RESTRICTED_COMMANDS_ROLE_ID, SlashCommandPermissionType.ROLE, True) ] }) slash.add_subcommand(base=BASE_COMMAND_NAME, **{ "cmd": _some_function, "description": "description", "guild_ids": [GUILD_ID], "connector": { "utilisateur": "member" }, "options": [ { 'name': 'utilisateur', 'description': "Utilisateur à qui le message d'accueil sera envoyé", 'type': SlashCommandOptionType.USER, 'required': True } ] }) ```
2021-05-31T16:17:24
interactions-py/interactions.py
226
interactions-py__interactions.py-226
[ "225" ]
a7f468b66137726a2535a9d62177f6e25a8f8d2f
diff --git a/discord_slash/client.py b/discord_slash/client.py --- a/discord_slash/client.py +++ b/discord_slash/client.py @@ -535,6 +535,7 @@ def add_slash_command( """ name = name or cmd.__name__ name = name.lower() + guild_ids = guild_ids if guild_ids else [] if not all(isinstance(item, int) for item in guild_ids): raise error.IncorrectGuildIDType( f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name}' will be deactivated and broken until fixed." @@ -616,7 +617,8 @@ def add_subcommand( name = name or cmd.__name__ name = name.lower() description = description or getdoc(cmd) - if guild_ids and not all(isinstance(item, int) for item in guild_ids): + guild_ids = guild_ids if guild_ids else [] + if not all(isinstance(item, int) for item in guild_ids): raise error.IncorrectGuildIDType( f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name}' will be deactivated and broken until fixed." ) diff --git a/discord_slash/cog_ext.py b/discord_slash/cog_ext.py --- a/discord_slash/cog_ext.py +++ b/discord_slash/cog_ext.py @@ -58,11 +58,10 @@ def wrapper(cmd): else: opts = options - if guild_ids is not None: - if not all(isinstance(item, int) for item in guild_ids): - raise IncorrectGuildIDType( - f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed." - ) + if guild_ids and not all(isinstance(item, int) for item in guild_ids): + raise IncorrectGuildIDType( + f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed." + ) _cmd = { "func": cmd, @@ -147,11 +146,10 @@ def wrapper(cmd): else: opts = options - if guild_ids is not None: - if not all(isinstance(item, int) for item in guild_ids): - raise IncorrectGuildIDType( - f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed." - ) + if guild_ids and not all(isinstance(item, int) for item in guild_ids): + raise IncorrectGuildIDType( + f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed." + ) _cmd = { "func": None,
Traceback from library on basic function Traceback on Version 2.0.1. On 1.1.0 working as expected. python 3.8.5 ### Traceback ``` Traceback (most recent call last): File "bot.py", line 28, in <module> async def purge(ctx, number_of_messages : int): File "/home/solumath/.local/lib/python3.8/site-packages/discord_slash/client.py", line 760, in wrapper obj = self.add_slash_command( File "/home/solumath/.local/lib/python3.8/site-packages/discord_slash/client.py", line 538, in add_slash_command if not all(isinstance(item, int) for item in guild_ids): TypeError: 'NoneType' object is not iterable ``` ### Code ```py @slash.slash(name="purge", description="delete number of messages") @commands.has_permissions(manage_messages=True) async def purge(ctx, number_of_messages : int): await ctx.channel.purge(limit=number_of_messages) await ctx.send("What is real? How do you define real?", delete_after=5) ``` Probably in library as discussed on discord server.
2021-06-23T12:30:46
interactions-py/interactions.py
230
interactions-py__interactions.py-230
[ "228" ]
83937fefc297c3062c018381ef5c7751225b5d9b
diff --git a/discord_slash/context.py b/discord_slash/context.py --- a/discord_slash/context.py +++ b/discord_slash/context.py @@ -373,33 +373,57 @@ async def edit_origin(self, **fields): """ _resp = {} - content = fields.get("content") - if content: - _resp["content"] = str(content) + try: + content = fields["content"] + except KeyError: + pass + else: + if content is not None: + content = str(content) + _resp["content"] = content + + try: + components = fields["components"] + except KeyError: + pass + else: + if components is None: + _resp["components"] = [] + else: + _resp["components"] = components + + try: + embeds = fields["embeds"] + except KeyError: + # Nope + pass + else: + if not isinstance(embeds, list): + raise error.IncorrectFormat("Provide a list of embeds.") + if len(embeds) > 10: + raise error.IncorrectFormat("Do not provide more than 10 embeds.") + _resp["embeds"] = [e.to_dict() for e in embeds] + + try: + embed = fields["embed"] + except KeyError: + pass + else: + if "embeds" in _resp: + raise error.IncorrectFormat("You can't use both `embed` and `embeds`!") + + if embed is None: + _resp["embeds"] = [] + else: + _resp["embeds"] = [embed.to_dict()] - embed = fields.get("embed") - embeds = fields.get("embeds") file = fields.get("file") files = fields.get("files") - components = fields.get("components") - if components: - _resp["components"] = components - - if embed and embeds: - raise error.IncorrectFormat("You can't use both `embed` and `embeds`!") - if file and files: + if files is not None and file is not None: raise error.IncorrectFormat("You can't use both `file` and `files`!") if file: files = [file] - if embed: - embeds = [embed] - if embeds: - if not isinstance(embeds, list): - raise error.IncorrectFormat("Provide a list of embeds.") - elif len(embeds) > 10: - raise error.IncorrectFormat("Do not provide more than 10 embeds.") - _resp["embeds"] = [x.to_dict() for x in embeds] allowed_mentions = fields.get("allowed_mentions") _resp["allowed_mentions"] = ( diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -495,33 +495,57 @@ async def _slash_edit(self, **fields): """ _resp = {} - content = fields.get("content") - if content: - _resp["content"] = str(content) + try: + content = fields["content"] + except KeyError: + pass + else: + if content is not None: + content = str(content) + _resp["content"] = content + + try: + components = fields["components"] + except KeyError: + pass + else: + if components is None: + _resp["components"] = [] + else: + _resp["components"] = components + + try: + embeds = fields["embeds"] + except KeyError: + # Nope + pass + else: + if not isinstance(embeds, list): + raise error.IncorrectFormat("Provide a list of embeds.") + if len(embeds) > 10: + raise error.IncorrectFormat("Do not provide more than 10 embeds.") + _resp["embeds"] = [e.to_dict() for e in embeds] + + try: + embed = fields["embed"] + except KeyError: + pass + else: + if "embeds" in _resp: + raise error.IncorrectFormat("You can't use both `embed` and `embeds`!") + + if embed is None: + _resp["embeds"] = [] + else: + _resp["embeds"] = [embed.to_dict()] - embed = fields.get("embed") - embeds = fields.get("embeds") file = fields.get("file") files = fields.get("files") - components = fields.get("components") - if components: - _resp["components"] = components - - if embed and embeds: - raise error.IncorrectFormat("You can't use both `embed` and `embeds`!") - if file and files: + if files is not None and file is not None: raise error.IncorrectFormat("You can't use both `file` and `files`!") if file: files = [file] - if embed: - embeds = [embed] - if embeds: - if not isinstance(embeds, list): - raise error.IncorrectFormat("Provide a list of embeds.") - elif len(embeds) > 10: - raise error.IncorrectFormat("Do not provide more than 10 embeds.") - _resp["embeds"] = [x.to_dict() for x in embeds] allowed_mentions = fields.get("allowed_mentions") _resp["allowed_mentions"] = (
Cannot remove embeds nor components from a message [`ComponentContext.edit_origin`](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/83937fefc297c3062c018381ef5c7751225b5d9b/discord_slash/context.py#L369) and `SlashMessage.edit` ([`SlashMessage._slash_edit`](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/a7f468b66137726a2535a9d62177f6e25a8f8d2f/discord_slash/model.py#L492)) do not support using an empty list `[]` for the `embeds` nor `components` keyword argument, which prevents removing them from a message.
Going to add onto this - this seems to be due to how the library check for equality and the existence of kwargs. Empty lists are falsey values in Python, and since the library does `if kwarg.get("kwarg")` in order to see if a kwarg exists, this means that even if the kwarg exists, since [] is falsey, the library also mistakenly thinks it's false and so doesn't exist. This can be solved simply by using `is not None` for those checks instead, Might make the PR myself to fix this, who knows.
2021-06-24T03:07:32
interactions-py/interactions.py
231
interactions-py__interactions.py-231
[ "229" ]
83937fefc297c3062c018381ef5c7751225b5d9b
diff --git a/discord_slash/context.py b/discord_slash/context.py --- a/discord_slash/context.py +++ b/discord_slash/context.py @@ -188,15 +188,22 @@ async def send( "The top level of the components list must be made of ActionRows!" ) + if allowed_mentions is not None: + if self.bot.allowed_mentions is not None: + allowed_mentions = self.bot.allowed_mentions.merge(allowed_mentions).to_dict() + else: + allowed_mentions = allowed_mentions.to_dict() + else: + if self.bot.allowed_mentions is not None: + allowed_mentions = self.bot.allowed_mentions.to_dict() + else: + allowed_mentions = {} + base = { "content": content, "tts": tts, "embeds": [x.to_dict() for x in embeds] if embeds else [], - "allowed_mentions": allowed_mentions.to_dict() - if allowed_mentions - else self.bot.allowed_mentions.to_dict() - if self.bot.allowed_mentions - else {}, + "allowed_mentions": allowed_mentions, "components": components or [], } if hidden: @@ -402,13 +409,18 @@ async def edit_origin(self, **fields): _resp["embeds"] = [x.to_dict() for x in embeds] allowed_mentions = fields.get("allowed_mentions") - _resp["allowed_mentions"] = ( - allowed_mentions.to_dict() - if allowed_mentions - else self.bot.allowed_mentions.to_dict() - if self.bot.allowed_mentions - else {} - ) + if allowed_mentions is not None: + if self.bot.allowed_mentions is not None: + _resp["allowed_mentions"] = self.bot.allowed_mentions.merge( + allowed_mentions + ).to_dict() + else: + _resp["allowed_mentions"] = allowed_mentions.to_dict() + else: + if self.bot.allowed_mentions is not None: + _resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict() + else: + _resp["allowed_mentions"] = {} if not self.responded: if files and not self.deferred: diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -524,13 +524,18 @@ async def _slash_edit(self, **fields): _resp["embeds"] = [x.to_dict() for x in embeds] allowed_mentions = fields.get("allowed_mentions") - _resp["allowed_mentions"] = ( - allowed_mentions.to_dict() - if allowed_mentions - else self._state.allowed_mentions.to_dict() - if self._state.allowed_mentions - else {} - ) + if allowed_mentions is not None: + if self.bot.allowed_mentions is not None: + _resp["allowed_mentions"] = self.bot.allowed_mentions.merge( + allowed_mentions + ).to_dict() + else: + _resp["allowed_mentions"] = allowed_mentions.to_dict() + else: + if self.bot.allowed_mentions is not None: + _resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict() + else: + _resp["allowed_mentions"] = {} await self._http.edit(_resp, self.__interaction_token, self.id, files=files)
allowed_mentions differ from discord.py discord.py [merges](https://github.com/Rapptz/discord.py/blob/d30fea5b0dcba9cd130026b56ec01e78bd788aff/discord/abc.py#L1262) the local `allowed_mentions` into the global `allowed_mentions`, where as discord-py-slash-commands uses the global `allowed_mentions` as a default value if the local `allowed_mentions` isn't set. ([InteractionContext.send](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/859beb65c2744a5a4d3a422d647268dbe6502669/discord_slash/context.py#L195), [ComponentContext.edit_origin](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/859beb65c2744a5a4d3a422d647268dbe6502669/discord_slash/context.py#L406), SlashMessage.edit [[SlashMessage._slash_edit](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/a7f468b66137726a2535a9d62177f6e25a8f8d2f/discord_slash/model.py#L528)])
2021-06-24T22:02:48
interactions-py/interactions.py
252
interactions-py__interactions.py-252
[ "251" ]
38bbde4f1643fe83a09cfa9a44c6e0e5fcbb132d
diff --git a/discord_slash/model.py b/discord_slash/model.py --- a/discord_slash/model.py +++ b/discord_slash/model.py @@ -549,15 +549,15 @@ async def _slash_edit(self, **fields): allowed_mentions = fields.get("allowed_mentions") if allowed_mentions is not None: - if self.bot.allowed_mentions is not None: - _resp["allowed_mentions"] = self.bot.allowed_mentions.merge( + if self._state.allowed_mentions is not None: + _resp["allowed_mentions"] = self._state.allowed_mentions.merge( allowed_mentions ).to_dict() else: _resp["allowed_mentions"] = allowed_mentions.to_dict() else: - if self.bot.allowed_mentions is not None: - _resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict() + if self._state.allowed_mentions is not None: + _resp["allowed_mentions"] = self._state.allowed_mentions.to_dict() else: _resp["allowed_mentions"] = {}
`SlashMessage.edit` references non-existing attribute `bot` These [conditionals](https://github.com/discord-py-slash-commands/discord-py-interactions/blob/38bbde4f1643fe83a09cfa9a44c6e0e5fcbb132d/discord_slash/model.py#L552-L560) seem to be causing an AttributeError when running `SlashMessage.edit`. Reading through the code, `SlashMessage` does indeed not seem to inherit this attribute anywhere. The relevant [equivalent discord.py code ](https://github.com/Rapptz/discord.py/blob/d7ed88459341527a69e8d8a7a77ad1d9c5e2832c/discord/message.py#L1228-L1236 ) uses the attribute `_state` instead.
2021-07-03T02:04:16
interactions-py/interactions.py
440
interactions-py__interactions.py-440
[ "398" ]
e82b8a08a71a592db113498720d7e1920b027717
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -12,14 +12,18 @@ with open(path.join(HERE, PACKAGE_NAME, "base.py"), encoding="utf-8") as fp: VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1) + +def read_requirements(filename): + with open(filename, "r", encoding="utf-8") as fp: + return fp.read().strip().splitlines() + + extras = { - "lint": ["black", "flake8", "isort"], - "readthedocs": ["sphinx", "karma-sphinx-theme"], + "lint": read_requirements("requirements-lint.txt"), + "readthedocs": read_requirements("requirements-docs.txt"), } -extras["lint"] += extras["readthedocs"] extras["dev"] = extras["lint"] + extras["readthedocs"] - -requirements = open("requirements.txt").read().split("\n")[:-1] +requirements = read_requirements("requirements.txt") setup( name="discord-py-interactions",
[BUG] Development dependencies are included in install_requires **Describe the bug** Installing this library also installs all libraries used for local development (list below). This makes it impossible to use different versions of these tools in development. It also requires installing all of these tools in production for the bot to function, which unnecessarily bloats the installation size by over 80MB (on windows) **Reproducing steps** Install the library as directed in the documentation, `pip install discord-py-interactions` **What's normally expected** The library itself and only code necessary for running the bot is downloaded and installed. **What actually happened** Development tools for this project are also installed. If pip is used, other local tools are overwritten with the specific versions used in this project. If poetry is used, the tools are either downgraded to the version pinned in this library, or an error is generated when there is a version conflict. Either way devs using this library cannot use their own tooling. Output from pip: ``` Successfully installed Jinja2-3.0.3 MarkupSafe-2.0.1 Pygments-2.10.0 Sphinx-4.1.2 alabaster-0.7.12 babel-2.9.1 backports.entry-points-selectable-1.1.1 black-21.11b1 certifi-2021.10.8 cfgv-3.3.1 charset-normalizer-2.0.9 click-8.0.3 colorama-0.4.4 discord-py-interactions-4.0.1 distlib-0.3.4 docutils-0.17.1 filelock-3.4.0 identify-2.4.0 imagesize-1.3.0 isort-5.9.3 nodeenv-1.6.0 orjson-3.6.3 packaging-21.3 pathspec-0.9.0 platformdirs-2.4.0 pre-commit-2.16.0 pyparsing-3.0.6 regex-2021.11.10 requests-2.26.0 six-1.16.0 snowballstemmer-2.2.0 sphinx-hoverxref-1.0.0 sphinxcontrib-applehelp-1.0.2 sphinxcontrib-devhelp-1.0.2 sphinxcontrib-htmlhelp-2.0.0 sphinxcontrib-jsmath-1.0.1 sphinxcontrib-qthelp-1.0.3 sphinxcontrib-serializinghtml-1.1.5 tomli-1.2.3 urllib3-1.26.7 virtualenv-20.10.0 ``` pipdeptree shows that only `aiohttp`, and `orjson` are necessary for this package to function, the rest of them should be removed and instead placed in an extra dependency (e.g. `pip install discord-interactions[dev]`): ``` discord-py-interactions==4.0.1 - aiohttp [required: ==3.7.4.post0, installed: 3.7.4.post0] - async-timeout [required: >=3.0,<4.0, installed: 3.0.1] - attrs [required: >=17.3.0, installed: 21.2.0] - chardet [required: >=2.0,<5.0, installed: 4.0.0] - multidict [required: >=4.5,<7.0, installed: 5.2.0] - typing-extensions [required: >=3.6.5, installed: 3.10.0.2] - yarl [required: >=1.0,<2.0, installed: 1.6.3] - idna [required: >=2.0, installed: 3.2] - multidict [required: >=4.0, installed: 5.2.0] - black [required: ==21.11b1, installed: 21.11b1] - click [required: >=7.1.2, installed: 8.0.3] - colorama [required: Any, installed: 0.4.4] - mypy-extensions [required: >=0.4.3, installed: 0.4.3] - pathspec [required: >=0.9.0,<1, installed: 0.9.0] - platformdirs [required: >=2, installed: 2.4.0] - regex [required: >=2021.4.4, installed: 2021.11.10] - tomli [required: >=0.2.6,<2.0.0, installed: 1.2.3] - typing-extensions [required: >=3.10.0.0, installed: 3.10.0.2] - typing-extensions [required: !=3.10.0.1, installed: 3.10.0.2] - colorama [required: ==0.4.4, installed: 0.4.4] - flake8 [required: ==3.9.2, installed: 3.9.2] - mccabe [required: >=0.6.0,<0.7.0, installed: 0.6.1] - pycodestyle [required: >=2.7.0,<2.8.0, installed: 2.7.0] - pyflakes [required: >=2.3.0,<2.4.0, installed: 2.3.1] - isort [required: ==5.9.3, installed: 5.9.3] - orjson [required: ==3.6.3, installed: 3.6.3] - pre-commit [required: ==2.16.0, installed: 2.16.0] - cfgv [required: >=2.0.0, installed: 3.3.1] - identify [required: >=1.0.0, installed: 2.4.0] - nodeenv [required: >=0.11.1, installed: 1.6.0] - pyyaml [required: >=5.1, installed: 5.4.1] - toml [required: Any, installed: 0.10.2] - virtualenv [required: >=20.0.8, installed: 20.10.0] - backports.entry-points-selectable [required: >=1.0.4, installed: 1.1.1] - distlib [required: >=0.3.1,<1, installed: 0.3.4] - filelock [required: >=3.2,<4, installed: 3.4.0] - platformdirs [required: >=2,<3, installed: 2.4.0] - six [required: >=1.9.0,<2, installed: 1.16.0] - Sphinx [required: ==4.1.2, installed: 4.1.2] - alabaster [required: >=0.7,<0.8, installed: 0.7.12] - babel [required: >=1.3, installed: 2.9.1] - pytz [required: >=2015.7, installed: 2021.3] - colorama [required: >=0.3.5, installed: 0.4.4] - docutils [required: >=0.14,<0.18, installed: 0.17.1] - imagesize [required: Any, installed: 1.3.0] - Jinja2 [required: >=2.3, installed: 3.0.3] - MarkupSafe [required: >=2.0, installed: 2.0.1] - packaging [required: Any, installed: 21.3] - pyparsing [required: >=2.0.2,!=3.0.5, installed: 3.0.6] - Pygments [required: >=2.0, installed: 2.10.0] - requests [required: >=2.5.0, installed: 2.26.0] - certifi [required: >=2017.4.17, installed: 2021.10.8] - charset-normalizer [required: ~=2.0.0, installed: 2.0.9] - idna [required: >=2.5,<4, installed: 3.2] - urllib3 [required: >=1.21.1,<1.27, installed: 1.26.7] - setuptools [required: Any, installed: 58.3.0] - snowballstemmer [required: >=1.1, installed: 2.2.0] - sphinxcontrib-applehelp [required: Any, installed: 1.0.2] - sphinxcontrib-devhelp [required: Any, installed: 1.0.2] - sphinxcontrib-htmlhelp [required: >=2.0.0, installed: 2.0.0] - sphinxcontrib-jsmath [required: Any, installed: 1.0.1] - sphinxcontrib-qthelp [required: Any, installed: 1.0.3] - sphinxcontrib-serializinghtml [required: >=1.1.5, installed: 1.1.5] - sphinx-hoverxref [required: ==1.0.0, installed: 1.0.0] ``` **Versions** - [ ] I am using discord.py versions 1.7 and below with my code. - [ ] I am using 2.0 or higher, or a modified fork. - [ ] I am using dis-snek with my code. - [x] I am not using any of the listed above and am using the library code alone.
This isn't really a bug. All of the tools you see in our requirements file are simply ported over to `install_requires`. These tools **are** needed to run the most minimal bot possible. These tools are also downloaded in case anyone would wish to help contribute to our architecture without needing to manually install them on their own. We also have hard locked versions of the tools that we download exactly for that problem: having multiple versions of the same tools for development. If you have your virtual environment configured to have these tools install to that, you should have no issues with multiple versions at all. > These tools are needed to run the most minimal bot possible. This is absolutely not true. Why would I need to install a dozen sphinx plugins and flake8 for a bot to function? It's not like the bot itself needs to generate documentation or lint code. In fact, the minimal bot example on this page still functions without a hitch after I manually deleted all those library files. > These tools are also downloaded in case anyone would wish to help contribute to our architecture without needing to manually install them on their own. Developers who wish to contribute to this library are free to install these tools using an extra, which is common practice. Force installing them for everyone who install the library "in case they want to contribute" seems like a bad idea. > We also have hard locked versions of the tools that we download exactly for that problem: having multiple versions of the same tools for development. If you have your virtual environment configured to have these tools install to that, you should have no issues with multiple versions at all. Are you suggesting that bots should be developed in a *separate* virtualenv where this library is not installed? How would the bot even function in that case? To make it clear - you are force-installing pinned versions of dev tools **into** the bot that depends on this library. This means they also must be downloaded not only during testing but also in production as well. For some perspective, the bot that I have running on a (monkeypatched) v3 of this library totals about 70MB after installation. That's including this library and a dozen others including major ones like database interfaces with binaries. It's appalling that this library force installs more than the size of everything else combined in unnecessary downloads. > Why would I need to install a dozen sphinx plugins and flake8 for a bot to function? This really falls under the "helps people easily begin to contribute." > Developers who wish to contribute to this library are free to install these tools using an extra, which is common practice. We can remove some of the dev tools from `requirements.txt`, but we already use the extra requirement path for setup and configuration. This is also somewhat of a moot point since a vast majority of contributors will not use extras, which is ironic since it's a "common practice." But, just to give benefit of the doubt, we can also update our `CONTRIBUTING` file to include a better download example to target specifically the extra requirements. The fact of the matter is, that's a **convenience** preference. It's perfectly fine to also have them in the regular requirements.txt file. What hurts is seeing this: > Are you suggesting that bots should be developed in a separate virtualenv where this library is not installed? Yes, I am suggesting that. If you're so worried about specific versions of our requirements potentially tampering with the versions that you use in your project, you should really reconsider how you're installing it. Virtual environments were created in mind of handling something like this. It's up to you if you want to handle it on a global scale, but bots *should* be developed based off of their current modules/dependencies in their env/venv. > How would the bot even function in that case? By specifically importing the modules from that virtualenv. IDEs like PyCharm can easily do this, I'm not sure what might handicap other IDEs, or even a text editor such as VSCode from doing this. For reference, I use my own venv in VSCode to handle this. > This means they also must be downloaded not only during testing but also in production as well. This is probably the best point that I can see being raised in your concern of "dev-only" dependencies being needed in the main project requirements. > This really falls under the "helps people easily begin to contribute." This falls more under "help people easily contribute but make it hard to use". An analogy would be PCs/phones that come preloaded with 200 pieces of software that cannot be uninstalled or replaced with something people prefer because "it helps people easily get started". > but bots should be developed based off of their current modules/dependencies in their env/venv. I don't think you understand the point of what a "library" is then. When developing a bot, this library is included as a dependency in their `install_requires`. What would you do if, for example, `aiohttp` one day requires you to download 80MB of extra code and stops functioning unless you downgrade most of your tooling? > Virtual environments were created in mind of handling something like this. Virtual environments are created so that separate projects can have their own environments managed separately. It does not separate a particular project with code that it depends on. What you are actually suggesting is that I use your library in a bot, but at the same time I should not install this library at all. Are you developing this library in a virtualenv where `aiohttp` is not installed at all? > IDEs like PyCharm can easily do this Again, that's not how virtualenvs work. Yes, IDEs help create and manage virtualenvs, But you are suggesting that I place the code I write for my project in a SEPARATE virtualenv from the dependencies that I install (but do not develop for). > An analogy would be PCs/phones that come preloaded with 200 pieces of software that cannot be uninstalled or replaced with something people prefer because "it helps people easily get started". This is an incorrect analogy. As you mentioned, you can manually delete these. These aren't "uninstallable" parts of the project setup. We'll move forward with moving them to the `extras` installation list for setup. > What would you do if, for example, aiohttp one day requires you to download 80MB of extra code and stops functioning unless you downgrade most of your tooling? We have specific versions being downloaded, so this isn't applicable. I see where you're trying to get at, but this is already covered by hard-locking our installations. `aiohttp` for all intents and purposes should never have to be constantly upgraded to the very latest, that would be a horrible approach. To put this in perspective, discord.py only allows installation of a specific range of that module as a dependency due to python limitations of how far back the `async`/`await` iterators are allowed to go. > It does not separate a particular project with code that it depends on. Yes they do. Although, you would be using `pipenv` and not regular `pip` for this. [Read this article](https://www.activestate.com/resources/quick-reads/how-to-manage-python-dependencies-with-virtual-environments/) for more information on this. > But you are suggesting that I place the code I write for my project in a SEPARATE virtualenv from the dependencies that I install That's not what I'm suggesting. If I was suggesting anything, it's *targeting* your dependencies to be installed into* that venv. Here is the key point I've taken from this Issue so far: - We'll take all of our development related dependencies and put them into `extras` for setup so people may run the command line `pip install -e .[extra]` easily when reading our [contribution requirements.](https://github.com/goverfl0w/discord-interactions/#how-can-i-contribute) If I am missing any, please feel free to mention and correct me > This is an incorrect analogy. As you mentioned, you can manually delete these. That's a quick manual test I performed to make sure things still work without them. In practice, there will be less contributors to this project than there are users of it. It's unrealistic to expect all non-contributors to run 50 uninstall commands after every install to remove stuff they do not want. > We have specific versions being downloaded, so this isn't applicable. That is not the point of the discussion. Pinning is the right thing to do - I'm pinned to v3 of this library for now as well. By this logic, I should never upgrade to v4 because it's forcing me to use its dev toolset with no option to choose my own. > Although, you would be using pipenv and not regular pip for this. Read this article for more information on this. This does not solve the problem at hand - pipenv, poetry, etc are still working on top of virtualenv. Installing this library, regardless of which tool is used, either overwrites any local tools installed or causes version conflicts. I'm using poetry myself which helpfully errors out and points out the conflict as originating from this library, but other tools may not. It is also naive to expect/require that all consumers of a library to use a particular dependency management system when multiple popular ones exist. > Here is the key point I've taken from this Issue so far: Thank you - this is all I'm after, since this issue is making it impossible for me to update to v4 in any of the code bases I help maintain without rebuilding their dev tooling.
2022-01-19T05:47:08
interactions-py/interactions.py
444
interactions-py__interactions.py-444
[ "443" ]
e82b8a08a71a592db113498720d7e1920b027717
diff --git a/interactions/client.py b/interactions/client.py --- a/interactions/client.py +++ b/interactions/client.py @@ -269,8 +269,8 @@ async def create(data: ApplicationCommand) -> None: command.get("name") for command in cached_commands if command.get("name") ] - print(cached_commands) - print(cached_command_names) + log.debug(f"Cached commands: {cached_commands}") + log.debug(f"Cached command names: {cached_command_names}") if cached_commands: for command in commands:
[BUG] Leftover debug prints in synchronize() **Describe the bug** Starting the bot always print out the internal list of commands. This is presumably prints used during debugging that should have been removed or changed to proper logging. These lists can be very long if there are many registered commands with many parameters specified. https://github.com/goverfl0w/interactions.py/blob/unstable/interactions/client.py#L272-L273 **Reproducing steps** Start any valid bot. **What's normally expected** The bot starts up without unnecessary debug output. **What actually happened** The bot prints out two lists of application commands regardless of log level. **Versions** - [ ] I am using discord.py versions 1.7 and below with my code. - [ ] I am using 2.0 or higher, or a modified fork. - [ ] I am using dis-snek with my code. - [x] I am not using any of the listed above and am using the library code alone.
~~Should have been fixed in unstable with https://github.com/goverfl0w/interactions.py/pull/421~~ Edit: Oh sorry, different logging issue. You can open a pr to change it to a log.debug Will do, thanks
2022-01-19T07:54:30
interactions-py/interactions.py
559
interactions-py__interactions.py-559
[ "558" ]
862280c4d6aea89184408bcf588efa79d05d48b8
diff --git a/interactions/api/dispatch.py b/interactions/api/dispatch.py --- a/interactions/api/dispatch.py +++ b/interactions/api/dispatch.py @@ -33,6 +33,7 @@ def dispatch(self, __name: str, *args, **kwargs) -> None: :type \**kwargs: dict """ for event in self.events.get(__name, []): + self.loop.create_task(event(*args, **kwargs)) log.debug(f"DISPATCH: {event}") diff --git a/interactions/api/gateway.py b/interactions/api/gateway.py --- a/interactions/api/gateway.py +++ b/interactions/api/gateway.py @@ -310,17 +310,23 @@ def _dispatch_event(self, event: str, data: dict) -> None: if _context.data._json.get("options"): for option in _context.data.options: - __kwargs.update(self.__sub_command_context(option)) - __kwargs.update( - self.__option_type_context( - _context, - ( - option["type"] - if isinstance(option, dict) - else option.type.value - ), - ) + _type = self.__option_type_context( + _context, + ( + option["type"] + if isinstance(option, dict) + else option.type.value + ), ) + if _type: + if isinstance(option, dict): + _type[option["value"]]._client = self._http + option.update({"value": _type[option["value"]]}) + else: + _type[option.value]._client = self._http + option._json.update({"value": _type[option.value]}) + _option = self.__sub_command_context(option, _context) + __kwargs.update(_option) self._dispatch.dispatch("on_command", _context) elif data["type"] == InteractionType.MESSAGE_COMPONENT: @@ -390,7 +396,9 @@ def __contextualize(self, data: dict) -> object: context: object = getattr(__import__("interactions.context"), _context) return context(**data) - def __sub_command_context(self, data: Union[dict, Option]) -> Union[Tuple[str], dict]: + def __sub_command_context( + self, data: Union[dict, Option], _context: Optional[object] = MISSING + ) -> Union[Tuple[str], dict]: """ Checks if an application command schema has sub commands needed for argument collection. @@ -404,23 +412,62 @@ def __sub_command_context(self, data: Union[dict, Option]) -> Union[Tuple[str], _data: dict = data._json if isinstance(data, Option) else data def _check_auto(option: dict) -> Optional[Tuple[str]]: - if option.get("focused"): - return (option["name"], option["value"]) + try: + if option.get("focused"): + return (option["name"], option["value"]) + except AttributeError: + return False x = _check_auto(_data) if x: return x if _data.get("options"): - for option in _data["options"]: - if option["type"] == OptionType.SUB_COMMAND: - for sub_option in _data["options"]: + if _data["type"] == OptionType.SUB_COMMAND: + __kwargs["sub_command"] = _data["name"] + for sub_option in _data["options"]: + _check_auto(sub_option) + _option_context = self.__option_type_context( + _context, + ( + sub_option["type"] + if isinstance(sub_option, dict) + else sub_option.type.value + ), + ) + if _option_context: + if isinstance(sub_option, dict): + _option_context[sub_option["value"]]._client = self._http + sub_option.update({"value": _option_context[sub_option["value"]]}) + else: + _option_context[sub_option.value]._client = self._http + sub_option._json.update({"value": _option_context[sub_option.value]}) + __kwargs[sub_option["name"]] = sub_option["value"] + elif _data["type"] == OptionType.SUB_COMMAND_GROUP: + __kwargs["sub_command_group"] = _data["name"] + for _group_option in _data["options"]: + _check_auto(_group_option) + __kwargs["sub_command"] = _group_option["name"] + for sub_option in _group_option["options"]: _check_auto(sub_option) + _option_context = self.__option_type_context( + _context, + ( + sub_option["type"] + if isinstance(sub_option, dict) + else sub_option.type.value + ), + ) + if _option_context: + if isinstance(sub_option, dict): + _option_context[sub_option["value"]]._client = self._http + sub_option.update({"value": _option_context[sub_option["value"]]}) + else: + _option_context[sub_option.value]._client = self._http + sub_option._json.update( + {"value": _option_context[sub_option.value]} + ) __kwargs[sub_option["name"]] = sub_option["value"] - else: - for group in _data["options"]: - for _group_option in group: - _check_auto(_group_option) - __kwargs[_group_option["name"]] = _group_option["value"] + elif _data.get("value") and _data.get("name"): __kwargs[_data["name"]] = _data["value"] diff --git a/interactions/api/models/gw.py b/interactions/api/models/gw.py --- a/interactions/api/models/gw.py +++ b/interactions/api/models/gw.py @@ -389,7 +389,16 @@ class MessageReaction(DictSerializerMixin): :ivar Optional[Emoji] emoji?: The emoji of the event. """ - __slots__ = ("_json", "user_id", "channel_id", "message_id", "guild_id", "member", "emoji") + __slots__ = ( + "_json", + "_client", + "user_id", + "channel_id", + "message_id", + "guild_id", + "member", + "emoji", + ) def __init__(self, **kwargs): super().__init__(**kwargs) @@ -416,7 +425,7 @@ class ReactionRemove(MessageReaction): :ivar Optional[Emoji] emoji?: The emoji of the event. """ - __slots__ = ("_json", "user_id", "channel_id", "message_id", "guild_id", "emoji") + __slots__ = ("_json", "_client", "user_id", "channel_id", "message_id", "guild_id", "emoji") def __init__(self, **kwargs): super().__init__(**kwargs)
[BUG] command dispatching in the gateway is not functioning properly Command arguments are not dispatched correctly, sub_command(_group)s are not dispatched correctly
2022-02-22T17:57:28
interactions-py/interactions.py
600
interactions-py__interactions.py-600
[ "599" ]
29889fe6ecc20121c96c4876c1c0c5ada7565c17
diff --git a/interactions/api/models/channel.py b/interactions/api/models/channel.py --- a/interactions/api/models/channel.py +++ b/interactions/api/models/channel.py @@ -857,29 +857,10 @@ async def create_thread( return Channel(**res, _client=self._client) - @classmethod - async def get( - cls, - channel: Union[int, str], - client: "HTTPClient", # noqa - ) -> "Channel": - """ - Gets a channel based of its URL or its id. - - :param channel: The URL to the channel or the id of the channel - :type channel: Union[int, str] - :param client: The HTTPClient of your bot. Set as ``bot._http`` - :type client: HTTPClient - """ - - channel_id = channel if isinstance(channel, int) else int(channel.split(sep="/")[-1]) - - res = await client.get_channel(channel_id) - return cls(**res, _client=client) - @property def url(self) -> str: - return f"https://discord.com/channels/{self.guild_id}/{self.id}" if self.guild_id else None + _guild_id = "@me" if not isinstance(self.guild_id, int) else self.guild_id + return f"https://discord.com/channels/{_guild_id}/{self.id}" async def create_invite( self, diff --git a/interactions/api/models/guild.py b/interactions/api/models/guild.py --- a/interactions/api/models/guild.py +++ b/interactions/api/models/guild.py @@ -54,6 +54,13 @@ class EventStatus(IntEnum): CANCELED = 4 +class InviteTargetType(IntEnum): + """An enumerable object representing the different invite target types""" + + STREAM = 1 + EMBEDDED_APPLICATION = 2 + + class WelcomeChannels(DictSerializerMixin): """ A class object representing a welcome channel on the welcome screen. @@ -1755,6 +1762,14 @@ def __init__(self, **kwargs): else None ) + async def delete(self) -> None: + """Deletes the invite""" + + if not self._client: + raise AttributeError("HTTPClient not found!") + + await self._client.delete_invite(self.code) + class GuildTemplate(DictSerializerMixin): """
[REQUEST] implement channel invite and channel.get classmethod ^
2022-03-01T17:29:19
interactions-py/interactions.py
611
interactions-py__interactions.py-611
[ "610" ]
7489f6ff22b7f68249d50c2d5b582ff219ff1cc8
diff --git a/interactions/api/models/misc.py b/interactions/api/models/misc.py --- a/interactions/api/models/misc.py +++ b/interactions/api/models/misc.py @@ -171,54 +171,55 @@ def __hash__(self): # but end users might. -class Format: +class Color(object): """ - This object is used to respectively format markdown strings - provided by the WYSIWYG text editor for ease-of-accessibility - and simple implementations into bots. + An object representing Discord branding colors. .. note:: - All base strings are given brackets before being f-string - parsable to make conversion simplified. - - .. warning:: - the ``stylize()`` method must be used if you're actually - looking to give a **str** specific result. + This object only intends to cover the branding colors + and no others. The main reason behind this is due to + the current accepted standard of using hex codes or other + custom-defined colors. """ - USER = "<@%s>" - USER_NICK = "<@!%s>" - CHANNEL = "<#%s>" - ROLE = "<@&%s>" - EMOJI = "<:%s:%d>" - EMOJI_ANIMATED = "<a:%s:%d>" - TIMESTAMP = "<t:%s>" - TIMESTAMP_SHORT_T = "<t:%s:t>" - TIMESTAMP_LONG_T = "<t:%s:T>" - TIMESTAMP_SHORT_D = "<t:%s:d>" - TIMESTAMP_LONG_D = "<t:%s:D>" - TIMESTAMP_SHORT_DT = TIMESTAMP - TIMESTAMP_LONG_DT = "<t:%s:F>" - TIMESTAMP_RELATIVE = "<t:%s:R>" - - @classmethod - def stylize(cls, format: str, **kwargs) -> str: - r""" - This takes a format style from the object and - converts it into a usable string for ease. - - :param format: The format string to use. - :type format: str - :param \**kwargs: Multiple key-word arguments to use, where key=value is format=value. - :type \**kwargs: dict - :return: The formatted string. - :rtype: str - """ - new: str = f"" # noqa: F541 - for kwarg in kwargs: - if format == kwarg: - new %= format - return new + @property + def blurple(self) -> hex: + """Returns a hexadecimal value of the blurple color.""" + return 0x5865F2 + + @property + def green(self) -> hex: + """Returns a hexadecimal value of the green color.""" + return 0x57F287 + + @property + def yellow(self) -> hex: + """Returns a hexadecimal value of the yellow color.""" + return 0xFEE75C + + @property + def fuchsia(self) -> hex: + """Returns a hexadecimal value of the fuchsia color.""" + return 0xEB459E + + @property + def red(self) -> hex: + """Returns a hexadecimal value of the red color.""" + return 0xED4245 + + # I can't imagine any bot developers actually using these. + # If they don't know white is ff and black is 00, something's seriously + # wrong. + + @property + def white(self) -> hex: + """Returns a hexadecimal value of the white color.""" + return 0xFFFFFF + + @property + def black(self) -> hex: + """Returns a hexadecimal value of the black color.""" + return 0x000000 class MISSING:
[REQUEST] Add a basic `Color` object. ### Describe the feature. People may want to be able to easily call upon colors. Since it should be at least an accepted concept, I am wanting to add a basic `Color` object that includes the official branding colors. Please note that other colors that are custom-defined are ideally frowned on for this idea since the core library is about making implementations strict to what Discord offer, which we can argue debatably with the branding colors. ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-03-02T06:55:06
interactions-py/interactions.py
659
interactions-py__interactions.py-659
[ "582" ]
e456a9e2324b39e8087725aa94cace5455119192
diff --git a/interactions/client.py b/interactions/client.py --- a/interactions/client.py +++ b/interactions/client.py @@ -849,7 +849,7 @@ async def autocomplete_choice_list(ctx, user_input: str = ""): elif isinstance(command, str): _command_obj: ApplicationCommand = self._http.cache.interactions.get(command) if not _command_obj or not _command_obj.id: - if getattr(_command_obj, "guild_id", None) or self._automate_sync: + if getattr(_command_obj, "guild_id", None) or not self._automate_sync: _application_commands = self._loop.run_until_complete( self._http.get_application_commands( application_id=self.me.id,
[BUG] NoneType exception when using extension_autocomplete and disable_sync=True ### Describe the bug. When attempting to register an autocomplete callback inside an extension with `disable_sync=True`, a NoneType exception occurs ### List the steps. Define a command with autocomplete option and an autocomplete callback in an extension: ```py @interactions.extension_command( name="testauto", description="test autocomplete", options=[ Option( type=OptionType.STRING, name="message", description="test", autocomplete=True ) ] ) async def testauto_cmd(self, ctx: interactions.CommandContext, message: str = None): await ctx.send("You sent: {}".format(message)) @interactions.extension_autocomplete(command="test", name="message") async def do_autocomplete(self, ctx): await ctx.populate([ Choice( name="One", value="1" ), Choice( name="Two", value="2" ) ]) ``` ### What you expected. To register a callback successfully ### What you saw. ```py Traceback (most recent call last): File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/main.py", line 75, in <module> client.load("cogs." + cog) File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/env/lib/python3.9/site-packages/interactions/client.py", line 937, in load raise error from error File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/env/lib/python3.9/site-packages/interactions/client.py", line 933, in load extension = setup(self, *args, **kwargs) File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/cogs/helloworld.py", line 188, in setup HelloWorld(client) File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/env/lib/python3.9/site-packages/interactions/client.py", line 1143, in __new__ func = client.autocomplete(*args, **kwargs)(func) File "/Users/ventus/GitRepos/discord-py-interactions_boilerplate/env/lib/python3.9/site-packages/interactions/client.py", line 850, in autocomplete _command: Union[Snowflake, int] = int(_command_obj.id) AttributeError: 'NoneType' object has no attribute 'id' ``` ### What version of the library did you use? stable ### Code of Conduct - [X] I agree to follow the contribution requirements.
In #477, I implemented logic to append the scope whenever the `Client.command` decorator was called, to an internal set inside `Client`. This allows autocomplete to view if a command is limited to a scope and can fetch that command successfully. The `_scope` set does not seem to get populated in `extension_command`, therefore `_command_obj` never gets populated with ApplicationCommand data Will try to submit a PR with fixes Here: https://github.com/interactions-py/library/blob/6cc6333a26d9686a900c2119f94ef14f5e7503bc/interactions/client.py#L832 Inverting the `self._automate_sync` check fixes this > Here: > > https://github.com/interactions-py/library/blob/6cc6333a26d9686a900c2119f94ef14f5e7503bc/interactions/client.py#L832 > > Inverting the `self._automate_sync` check fixes this As in `if not self._automate_sync or ...` ? > As in `if not self._automate_sync or ...` ? Yes, I'm pretty certain that was what I used to resolve in my testing
2022-03-19T17:19:51
interactions-py/interactions.py
900
interactions-py__interactions.py-900
[ "896" ]
2ed99feff0ebe8924576d23203236c194717f0d1
diff --git a/interactions/api/error.py b/interactions/api/error.py --- a/interactions/api/error.py +++ b/interactions/api/error.py @@ -269,7 +269,7 @@ def lookup(code: int) -> str: 180002: "Failed to create stage needed for stage event", }.get(code, f"Unknown error: {code}") - def __init__(self, message: str = None, code: int = 0, severity: int = 0, **kwargs): + def __init__(self, code: int = 0, message: str = None, severity: int = 0, **kwargs): self.code: int = code self.severity: int = severity self.data: dict = kwargs.pop("data", None)
[BUG] message and code are swapped in LibraryException ### Describe the bug. in `LibraryException` this is the `__init__`: ```py def __init__(self, message: str = None, code: int = 0, severity: int = 0, **kwargs): ``` however, the library uses errors like: ```py raise LibraryException(11, message="Your command must have a name.") ``` which when tried in a terminal results in ```py >>> raise LibraryException(11, message="Your command must have a name.") Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: __init__() got multiple values for argument 'message' ``` so the `__init__` should instead be ```py def __init__(self, code: int = 0, message: str = None, severity: int = 0, **kwargs): ``` ### List the steps. ```py >>> from interactions import LibraryException >>> raise LibraryException(11, message="Your command must have a name.") Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: __init__() got multiple values for argument 'message' ``` ### What you expected. ```py Traceback (most recent call last): File "<stdin>", line 1, in <module> interactions.api.error.LibraryException: An error occurred: Your command must have a name., with code '11' and severity '0' ``` ### What you saw. ```py Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: __init__() got multiple values for argument 'message' ``` ### What version of the library did you use? unstable ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
"intended feature 🤓" jokes aside this is a breaking bug when an interaction exception occurs anywhere as our running process only throws it and keeps the loop ongoing
2022-06-30T15:04:41
interactions-py/interactions.py
908
interactions-py__interactions.py-908
[ "870" ]
1a55229f3ae67d514c66693b98a2359bd0f86b16
diff --git a/interactions/client/context.py b/interactions/client/context.py --- a/interactions/client/context.py +++ b/interactions/client/context.py @@ -352,35 +352,53 @@ async def edit(self, content: Optional[str] = MISSING, **kwargs) -> Message: msg = None if self.deferred: - if hasattr(self.message, "id") and self.message.id is not None: - res = await self._client.edit_message( - int(self.channel_id), int(self.message.id), payload=payload - ) - self.message = msg = Message(**res, _client=self._client) - else: - res = await self._client.edit_interaction_response( - token=self.token, - application_id=str(self.id), - data={"type": self.callback.value, "data": payload}, - message_id=self.message.id if self.message else "@original", - ) - if res["flags"] == 64: - log.warning("You can't edit hidden messages.") + if ( + hasattr(self.message, "id") + and self.message.id is not None + and self.message.flags != 64 + ): + try: + res = await self._client.edit_message( + int(self.channel_id), int(self.message.id), payload=payload + ) + except LibraryException as e: + if e.code in {10015, 10018}: + log.warning(f"You can't edit hidden messages." f"({e.message}).") + else: + # if its not ephemeral or some other thing. + raise e from e else: - await self._client.edit_message( - int(self.channel_id), res["id"], payload=payload + self.message = msg = Message(**res, _client=self._client) + else: + try: + res = await self._client.edit_interaction_response( + token=self.token, + application_id=str(self.id), + data=payload, + message_id=self.message.id + if self.message and self.message.flags != 64 + else "@original", ) + except LibraryException as e: + if e.code in {10015, 10018}: + log.warning(f"You can't edit hidden messages." f"({e.message}).") + else: + # if its not ephemeral or some other thing. + raise e from e + else: self.message = msg = Message(**res, _client=self._client) else: - res = await self._client.edit_interaction_response( - token=self.token, - application_id=str(self.application_id), - data={"type": self.callback.value, "data": payload}, - ) - if res["flags"] == 64: - log.warning("You can't edit hidden messages.") + try: + res = await self._client.edit_interaction_response( + token=self.token, application_id=str(self.application_id), data=payload + ) + except LibraryException as e: + if e.code in {10015, 10018}: + log.warning(f"You can't edit hidden messages." f"({e.message}).") + else: + # if its not ephemeral or some other thing. + raise e from e else: - await self._client.edit_message(int(self.channel_id), res["id"], payload=payload) self.message = msg = Message(**res, _client=self._client) if msg is not None: @@ -395,15 +413,18 @@ async def defer(self, ephemeral: Optional[bool] = False) -> None: :param ephemeral?: Whether the deferred state is hidden or not. :type ephemeral: Optional[bool] """ - self.deferred = True - _ephemeral: int = (1 << 6) if ephemeral else 0 - self.callback = InteractionCallbackType.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE + if not self.responded: + self.deferred = True + _ephemeral: int = (1 << 6) if ephemeral else 0 + self.callback = InteractionCallbackType.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE - await self._client.create_interaction_response( - token=self.token, - application_id=int(self.id), - data={"type": self.callback.value, "data": {"flags": _ephemeral}}, - ) + await self._client.create_interaction_response( + token=self.token, + application_id=int(self.id), + data={"type": self.callback.value, "data": {"flags": _ephemeral}}, + ) + + self.responded = True async def send(self, content: Optional[str] = MISSING, **kwargs) -> Message: payload = await super().send(content, **kwargs) @@ -414,20 +435,12 @@ async def send(self, content: Optional[str] = MISSING, **kwargs) -> Message: _payload: dict = {"type": self.callback.value, "data": payload} msg = None - if self.responded or self.deferred: - if self.deferred: - res = await self._client.edit_interaction_response( - data=payload, - token=self.token, - application_id=str(self.application_id), - ) - self.responded = True - else: - res = await self._client._post_followup( - data=payload, - token=self.token, - application_id=str(self.application_id), - ) + if self.responded: + res = await self._client._post_followup( + data=payload, + token=self.token, + application_id=str(self.application_id), + ) self.message = msg = Message(**res, _client=self._client) else: await self._client.create_interaction_response( @@ -435,19 +448,20 @@ async def send(self, content: Optional[str] = MISSING, **kwargs) -> Message: application_id=int(self.id), data=_payload, ) - __newdata = await self._client.edit_interaction_response( - data={}, - token=self.token, - application_id=str(self.application_id), - ) - if not __newdata.get("code"): - # if sending message fails somehow - msg = Message(**__newdata, _client=self._client) - self.message = msg + + try: + _msg = await self._client.get_original_interaction_response( + self.token, str(self.application_id) + ) + except LibraryException: + pass + else: + self.message = msg = Message(**_msg, _client=self._client) + self.responded = True + if msg is not None: return msg - return Message( **payload, _client=self._client, @@ -531,6 +545,7 @@ class ComponentContext(_Context): async def edit(self, content: Optional[str] = MISSING, **kwargs) -> Message: payload = await super().edit(content, **kwargs) + msg = None if not self.deferred: self.callback = InteractionCallbackType.UPDATE_MESSAGE @@ -539,20 +554,23 @@ async def edit(self, content: Optional[str] = MISSING, **kwargs) -> Message: token=self.token, application_id=int(self.id), ) - payload = Message(**payload, _client=self._client) - for attr in payload.__slots__: - if getattr(self.message, attr, None) and not getattr(payload, attr, None): - setattr(payload, attr, getattr(self.message, attr)) - payload._json[attr] = self.message._json[attr] - self.message = payload + + try: + _msg = await self._client.get_original_interaction_response( + self.token, str(self.application_id) + ) + except LibraryException: + pass + else: + self.message = msg = Message(**_msg, _client=self._client) + self.responded = True elif self.callback != InteractionCallbackType.DEFERRED_UPDATE_MESSAGE: - res = await self._client._post_followup( + await self._client._post_followup( data=payload, token=self.token, application_id=str(self.application_id), ) - self.message = Message(**res, _client=self._client) else: res = await self._client.edit_interaction_response( data=payload, @@ -560,12 +578,12 @@ async def edit(self, content: Optional[str] = MISSING, **kwargs) -> Message: application_id=str(self.application_id), ) self.responded = True - self.message = Message(**res, _client=self._client) + self.message = msg = Message(**res, _client=self._client) - if self.message is None: - self.message = Message(**payload, _client=self._client) + if msg is not None: + return msg - return self.message + return Message(**payload, _client=self._client) async def send(self, content: Optional[str] = MISSING, **kwargs) -> Message: payload = await super().send(content, **kwargs) @@ -576,46 +594,34 @@ async def send(self, content: Optional[str] = MISSING, **kwargs) -> Message: _payload: dict = {"type": self.callback.value, "data": payload} msg = None - if ( - self.responded - or self.deferred - or self.callback == InteractionCallbackType.DEFERRED_UPDATE_MESSAGE - ): - if self.deferred: - res = await self._client.edit_interaction_response( - data=payload, - token=self.token, - application_id=str(self.application_id), - ) - self.responded = True - else: - res = await self._client._post_followup( - data=payload, - token=self.token, - application_id=str(self.application_id), - ) + if self.responded: + res = await self._client._post_followup( + data=payload, + token=self.token, + application_id=str(self.application_id), + ) self.message = msg = Message(**res, _client=self._client) - else: await self._client.create_interaction_response( token=self.token, application_id=int(self.id), data=_payload, ) - __newdata = await self._client.edit_interaction_response( - data={}, - token=self.token, - application_id=str(self.application_id), - ) - if not __newdata.get("code"): - # if sending message fails somehow - msg = Message(**__newdata, _client=self._client) - self.message = msg + + try: + _msg = await self._client.get_original_interaction_response( + self.token, str(self.application_id) + ) + except LibraryException: + pass + else: + self.message = msg = Message(**_msg, _client=self._client) + self.responded = True if msg is not None: return msg - return Message(**payload) + return Message(**payload, _client=self._client) async def defer( self, ephemeral: Optional[bool] = False, edit_origin: Optional[bool] = False @@ -629,20 +635,24 @@ async def defer( :param edit_origin?: Whether you want to edit the original message or send a followup message :type edit_origin: Optional[bool] """ - self.deferred = True - _ephemeral: int = (1 << 6) if bool(ephemeral) else 0 + if not self.responded: - # ephemeral doesn't change callback typings. just data json - if edit_origin: - self.callback = InteractionCallbackType.DEFERRED_UPDATE_MESSAGE - else: - self.callback = InteractionCallbackType.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE + self.deferred = True + _ephemeral: int = (1 << 6) if bool(ephemeral) else 0 - await self._client.create_interaction_response( - token=self.token, - application_id=int(self.id), - data={"type": self.callback.value, "data": {"flags": _ephemeral}}, - ) + # ephemeral doesn't change callback typings. just data json + if edit_origin: + self.callback = InteractionCallbackType.DEFERRED_UPDATE_MESSAGE + else: + self.callback = InteractionCallbackType.DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE + + await self._client.create_interaction_response( + token=self.token, + application_id=int(self.id), + data={"type": self.callback.value, "data": {"flags": _ephemeral}}, + ) + + self.responded = True @property def custom_id(self) -> Optional[str]:
[BUG] Sometimes missing channel id and message id in message when ctx.send/edit ### Describe the bug. When trying to send message, edit interaction, or edit message, message id and channel id is sometimes missing. Usually it's always missing when ctx.send, but sometimes it's missing when edit, reported by paginator ext users ### List the steps. 1. Make a command 2. Try to send and edit: ```py msg = await ctx.send("test") print(msg.id, msg.channel_id) msg = await ctx.edit("edit") print(msg.id, msg.channel_id) await msg.edit("test") print(msg.id, msg.channel_id) ``` ### What you expected. Message should always have message and channel IDs ```py msg = await ctx.send("test") print(msg.id, msg.channel_id) msg = await ctx.edit("edit") print(msg.id, msg.channel_id) await msg.edit("test") print(msg.id, msg.channel_id) ``` ### What you saw. Instead this happens ```py None None 987906535692849182 862150987288412170 987906535692849182 862150987288412170 ``` Sometimes it also occurs when ctx.edit or msg.edit, from reports of paginator ext users ### What version of the library did you use? unstable ### Version specification Happens in stable, in unstable, and in 4.3 beta 1 ### Code of Conduct - [X] I agree to follow the contribution requirements.
Narrowed down the scope of this bug due to the fact that neither me nor Delta can reproduce it. It seems to only trigger 100% of the time on some devices and near 0% of the time on others. This is very likely due to how `interactions.py` tries to get the message object when sending without deferring.
2022-07-09T16:48:18
interactions-py/interactions.py
1,035
interactions-py__interactions.py-1035
[ "1034" ]
e90a5fd50dc149506b7dbbb928235a124d9e422a
diff --git a/interactions/api/models/member.py b/interactions/api/models/member.py --- a/interactions/api/models/member.py +++ b/interactions/api/models/member.py @@ -490,7 +490,7 @@ def get_avatar_url( :return: URL of the members's avatar (None will be returned if no avatar is set) :rtype: str """ - if not self.avatar: + if not self.avatar or self.avatar == self.user.avatar: return None if guild_id is MISSING:
[BUG] Member.get_avatar_url returns wrong URL when a member doesn't have a guild avatar. ### Describe the bug. When using `Member.get_avatar_url`, the method returns a URL that's built as follows: ``` f"https://cdn.discordapp.com/guilds/{_guild_id}/users/{int(self.user.id)}/avatars/{self.avatar}" ``` The URL that's returned is, if the member object has a server-specific profile picture, correct. However, when it doesn't, a malformed cdn URL is returned which points to nothing. This is caused by the `avatar` property returning the hash of a global profile picture. I propose checking if `Member.user.avatar` and `Member.avatar` match, and if so, return `None`. A PR will be created shortly after this issue if you believe this change is appropiate. ### List the steps. 1. Create a dummy command with a member option 2. Have the bot send both `Member.get_avatar_url()` & `Member.user.avatar_url` 3.1 Input a user with a server profile picture 4.1 Notice how both CDN URLs work and point to valid images 3.2 Input a user without a server profile picture 4.2 A server profile picture link is sent 4.3 Notice how it points to a nonexistent image ### What you expected. - ### What you saw. - ### What version of the library did you use? unstable ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-08-22T16:01:28
interactions-py/interactions.py
1,051
interactions-py__interactions.py-1051
[ "1048" ]
ae31788dc46f3d5420443dc2ba5716a68661444b
diff --git a/interactions/api/models/message.py b/interactions/api/models/message.py --- a/interactions/api/models/message.py +++ b/interactions/api/models/message.py @@ -477,7 +477,7 @@ def insert_field_at( """ Inserts a field in the embed at the specified index - :param index: The new field's index + :param index: The field's index to insert :type index: int :param name: The name of the field :type name: str @@ -501,7 +501,7 @@ def set_field_at( """ Overwrites the field in the embed at the specified index - :param index: The new field's index + :param index: The field's index to overwrite :type index: int :param name: The name of the field :type name: str @@ -526,7 +526,7 @@ def remove_field(self, index: int) -> None: """ Remove field at the specified index - :param index: The new field's index + :param index: The field's index to remove :type index: int """
[BUG] Docs: Incorrect parameter description on Embed.remove_field() ### Describe the bug. In the docs, the description of the `index` parameter to the `Embed.remove_field()` method says it's the index of the new field. This doesn't make sense, since you pass the index of the field to be removed. ### List the steps. 1. Open https://interactionspy.readthedocs.io/en/latest/api.models.message.html#interactions.api.models.message.Embed.remove_field 2. see description of `remove_field`'s `index` parameter ### What you expected. `Index of the field to be removed` ### What you saw. `The new field’s index` ### What version of the library did you use? stable ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-08-31T15:18:37
interactions-py/interactions.py
1,068
interactions-py__interactions.py-1068
[ "1065" ]
d0efc84f234b2218e5e57850b4d7b9c7c0655414
diff --git a/interactions/utils/get.py b/interactions/utils/get.py --- a/interactions/utils/get.py +++ b/interactions/utils/get.py @@ -194,8 +194,9 @@ def _check(): _objects: List[Union[_obj, Coroutine]] = [] kwarg_name += "s" - force_cache = kwargs.pop("force", None) == "cache" - force_http = kwargs.pop("force", None) == "http" + force_arg = kwargs.pop("force", None) + force_cache = force_arg == "cache" + force_http = force_arg == "http" if not force_http: _objects = _get_cache(_obj, client, kwarg_name, _list=True, **kwargs) @@ -230,8 +231,10 @@ def _check(): _obj: Optional[_T] = None - force_cache = kwargs.pop("force", None) == "cache" - force_http = kwargs.pop("force", None) == "http" + force_arg = kwargs.pop("force", None) + force_cache = force_arg == "cache" + force_http = force_arg == "http" + if not force_http: _obj = _get_cache(obj, client, kwarg_name, **kwargs)
[BUG] forcing http in get() not working ### Describe the bug. forcing http in get() function returns the cache if there is one. ### List the steps. 1. get member: `member: interactions.Member = await di.get(client=bot, obj=interactions.Member, parent_id=ctx.guild_id, object_id=ctx.author.id._snowflake, force="http")` 2. Add Role to Member via Discord 3. get member again 4. compare member.role before and after ### What you expected. The rolelists before/after should have difference ### What you saw. Instead the rolelists are the same ### What version of the library did you use? release ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-09-03T11:49:23
interactions-py/interactions.py
1,076
interactions-py__interactions.py-1076
[ "1075", "1075" ]
497c7a5de399ca5f088fc26da6a141ca96d26cc4
diff --git a/interactions/client/bot.py b/interactions/client/bot.py --- a/interactions/client/bot.py +++ b/interactions/client/bot.py @@ -460,13 +460,13 @@ async def __get_all_commands(self) -> None: # responsible for checking if a command is in the cache but not a coro -> allowing removal for _id in _guild_ids: - _cmds = await self._http.get_application_commands( - application_id=self.me.id, guild_id=_id, with_localizations=True - ) - - if isinstance(_cmds, dict) and _cmds.get("code"): - if int(_cmds.get("code")) != 50001: - raise LibraryException(_cmds["code"], message=f'{_cmds["message"]} |') + try: + _cmds = await self._http.get_application_commands( + application_id=self.me.id, guild_id=_id, with_localizations=True + ) + except LibraryException as e: + if int(e.code) != 50001: + raise LibraryException(code=e.code, message=e.message) log.warning( f"Your bot is missing access to guild with corresponding id {_id}! "
[BUG] Error when syncing to guild without app.cmds scope with disable_sync ### Describe the bug. *Everything I describe is based off other people's reports and theoretical guesswork. This may be inaccurate.* Many big bots are in a large number of guilds, and so it makes sense for them to turn off `disable_sync`. Besides for the obvious, this has another behavior, however. Instead of erroring out if a guild has the bot without the `application.commands` scope (which is possible under certain contexts, and happens especially often for bigger bots), having `disable_sync` will instead simply warn the user - we don't need to sync to the bot so not having application commands isn't *needed*, but it is a thing to note. However, in 4.3.1 *at least*, the bot has been erroring out when seeing a guild without the scope instead of simply warning. ### List the steps. 1. Have a bot be in a server (at least one) where it doesn't have the `application.commands` scope. 2. Start a bot with `disable_sync=True`. ### What you expected. For interactions.py to warn the user, but not necessarily stop them. ### What you saw. ```python interactions.api.error.LibraryException: An error occurred: Missing Access, with code '50001' and severity '40' Could not prepare the client: Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/interactions/ext/autosharder/dummy.py", line 47, in _ready await self._Client__get_all_commands() File "/usr/local/lib/python3.8/dist-packages/interactions/client/bot.py", line 426, in __get_all_commands _cmds = await self._http.get_application_commands( File "/usr/local/lib/python3.8/dist-packages/interactions/api/http/interaction.py", line 45, in get_application_commands return await self._req.request( File "/usr/local/lib/python3.8/dist-packages/interactions/api/http/request.py", line 174, in request raise LibraryException( interactions.api.error.LibraryException: An error occurred: ``` ### What version of the library did you use? stable ### Version specification 4.3.1 ### Code of Conduct - [X] I agree to follow the contribution requirements. [BUG] Error when syncing to guild without app.cmds scope with disable_sync ### Describe the bug. *Everything I describe is based off other people's reports and theoretical guesswork. This may be inaccurate.* Many big bots are in a large number of guilds, and so it makes sense for them to turn off `disable_sync`. Besides for the obvious, this has another behavior, however. Instead of erroring out if a guild has the bot without the `application.commands` scope (which is possible under certain contexts, and happens especially often for bigger bots), having `disable_sync` will instead simply warn the user - we don't need to sync to the bot so not having application commands isn't *needed*, but it is a thing to note. However, in 4.3.1 *at least*, the bot has been erroring out when seeing a guild without the scope instead of simply warning. ### List the steps. 1. Have a bot be in a server (at least one) where it doesn't have the `application.commands` scope. 2. Start a bot with `disable_sync=True`. ### What you expected. For interactions.py to warn the user, but not necessarily stop them. ### What you saw. ```python interactions.api.error.LibraryException: An error occurred: Missing Access, with code '50001' and severity '40' Could not prepare the client: Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/interactions/ext/autosharder/dummy.py", line 47, in _ready await self._Client__get_all_commands() File "/usr/local/lib/python3.8/dist-packages/interactions/client/bot.py", line 426, in __get_all_commands _cmds = await self._http.get_application_commands( File "/usr/local/lib/python3.8/dist-packages/interactions/api/http/interaction.py", line 45, in get_application_commands return await self._req.request( File "/usr/local/lib/python3.8/dist-packages/interactions/api/http/request.py", line 174, in request raise LibraryException( interactions.api.error.LibraryException: An error occurred: ``` ### What version of the library did you use? stable ### Version specification 4.3.1 ### Code of Conduct - [X] I agree to follow the contribution requirements.
As for what causes it, my theory is that we now detect and throw an error in our HTTP requests when we previously didn't. Take a look at [these lines that get invoked with `disable_sync=True`.](https://github.com/interactions-py/library/blob/7819ffc0d525033d9a03cd6f177b6b5f2639b0e4/interactions/client/bot.py#L426-L439). You can clearly see that triggering the warning depends on the response being a dict with a field for the "code", which represents the response returning an error. However, as of lately, the HTTP handler has been throwing proper errors instead of returning the dict with said errors, as a part of improving the error UX of interactions.py. This is good, but it means this legacy code here doesn't account for that. This likely can be solved by changing that whole section of code to try-except the `LibraryException` and propagating the warning from there while still suppressing the actual error. Fun fact: #868 does the fix needed, but for syncing, not for here. As for what causes it, my theory is that we now detect and throw an error in our HTTP requests when we previously didn't. Take a look at [these lines that get invoked with `disable_sync=True`.](https://github.com/interactions-py/library/blob/7819ffc0d525033d9a03cd6f177b6b5f2639b0e4/interactions/client/bot.py#L426-L439). You can clearly see that triggering the warning depends on the response being a dict with a field for the "code", which represents the response returning an error. However, as of lately, the HTTP handler has been throwing proper errors instead of returning the dict with said errors, as a part of improving the error UX of interactions.py. This is good, but it means this legacy code here doesn't account for that. This likely can be solved by changing that whole section of code to try-except the `LibraryException` and propagating the warning from there while still suppressing the actual error. Fun fact: #868 does the fix needed, but for syncing, not for here.
2022-09-08T03:01:09
interactions-py/interactions.py
1,145
interactions-py__interactions.py-1145
[ "1139" ]
1cb98f28780a6fa99dc5807bd0885ca85e356ebd
diff --git a/interactions/api/models/guild.py b/interactions/api/models/guild.py --- a/interactions/api/models/guild.py +++ b/interactions/api/models/guild.py @@ -2701,7 +2701,7 @@ async def get_audit_logs( """ _user_id = ( - (user_id.id if isinstance(user_id, User) else user_id) + int(user_id.id if isinstance(user_id, User) else user_id) if user_id is not MISSING else None ) @@ -2745,7 +2745,7 @@ async def get_latest_audit_log_action( "is the user ID and the second is the action type!", ) - _user = of[0].id if isinstance(of[0], (Member, User)) else of[0] + _user = int(of[0].id if isinstance(of[0], (Member, User)) else of[0]) res = await self._client.get_guild_auditlog( guild_id=int(self.id), user_id=_user, action_type=of[1] ) @@ -2758,7 +2758,9 @@ async def get_latest_audit_log_action( else: if isinstance(of, (Member, User)): of = of.id - res = await self._client.get_guild_auditlog(guild_id=int(self.id), user_id=of, limit=1) + res = await self._client.get_guild_auditlog( + guild_id=int(self.id), user_id=int(of), limit=1 + ) return AuditLogs(**res) @@ -2780,7 +2782,7 @@ async def get_full_audit_logs( _action_type = action_type if action_type is not MISSING else None _user_id = ( - (user_id.id if isinstance(user_id, User) else user_id) + int(user_id.id if isinstance(user_id, User) else user_id) if user_id is not MISSING else None ) diff --git a/interactions/api/models/member.py b/interactions/api/models/member.py --- a/interactions/api/models/member.py +++ b/interactions/api/models/member.py @@ -91,7 +91,7 @@ def guild_id(self) -> Optional[Union[Snowflake, LibraryException]]: if not self._client: raise LibraryException(code=13) - + from .guild import Guild if self.roles:
[BUG] Guild.get_audit_logs fails when passed a User or Snowflake to the user id ### Describe the bug. `Guild.get_audit_logs` accepts a User, Snowflake, int, or None in the `user_id` parameter. However, it fails to convert the first two into ints, which results in an error. This likely also applies to other audit log functions, as a quick glance shows that no conversion to ints happen. ### List the steps. (Image taken from a help thread) ![image](https://user-images.githubusercontent.com/84055084/198934164-6fd92bec-82d2-48e1-8ac5-8ff3a08135c3.png) ### What you expected. The code to run successfully ### What you saw. (Image taken from a help thread) ![image](https://user-images.githubusercontent.com/84055084/198934202-127d980b-d311-4713-b623-73f3507ca6db.png) ### What version of the library did you use? release ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-11-03T13:15:49
interactions-py/interactions.py
1,146
interactions-py__interactions.py-1146
[ "1107" ]
050b2d377c1d3cb6201590d024c8ae8406dd5532
diff --git a/interactions/client/bot.py b/interactions/client/bot.py --- a/interactions/client/bot.py +++ b/interactions/client/bot.py @@ -1806,38 +1806,40 @@ def __new__(cls, client: Client, *args, **kwargs) -> "Extension": for name, func in getmembers(self, predicate=iscoroutinefunction): # TODO we can make these all share the same list, might make it easier to load/unload if hasattr(func, "__listener_name__"): # set by extension_listener - func = client.event( - func, name=func.__listener_name__ - ) # capture the return value for friendlier ext-ing + all_listener_names: List[str] = func.__listener_name__ + for listener_name in all_listener_names: + func = client.event( + func, name=listener_name + ) # capture the return value for friendlier ext-ing - listeners = self._listeners.get(func.__listener_name__, []) - listeners.append(func) - self._listeners[func.__listener_name__] = listeners + listeners = self._listeners.get(listener_name, []) + listeners.append(func) + self._listeners[listener_name] = listeners if hasattr(func, "__component_data__"): - args, kwargs = func.__component_data__ - func = client.component(*args, **kwargs)(func) - - component = kwargs.get("component") or args[0] - comp_name = ( - _component(component).custom_id - if isinstance(component, (Button, SelectMenu)) - else component - ) - comp_name = f"component_{comp_name}" + all_component_data: List[Tuple[tuple, dict]] = func.__component_data__ + for args, kwargs in all_component_data: + func = client.component(*args, **kwargs)(func) + + component = kwargs.get("component") or args[0] + comp_name = ( + _component(component).custom_id + if isinstance(component, (Button, SelectMenu)) + else component + ) + comp_name = f"component_{comp_name}" - listeners = self._listeners.get(comp_name, []) - listeners.append(func) - self._listeners[comp_name] = listeners + listeners = self._listeners.get(comp_name, []) + listeners.append(func) + self._listeners[comp_name] = listeners if hasattr(func, "__autocomplete_data__"): all_args_kwargs = func.__autocomplete_data__ - for _ in all_args_kwargs: - args, kwargs = _[0], _[1] + for args, kwargs in all_args_kwargs: func = client.autocomplete(*args, **kwargs)(func) - name = kwargs.get("name") or args[0] - _command = kwargs.get("command") or args[1] + _command = kwargs.get("command") or args[0] + name = kwargs.get("name") or args[1] _command: Union[Snowflake, int] = ( _command.id if isinstance(_command, ApplicationCommand) else _command @@ -1850,16 +1852,17 @@ def __new__(cls, client: Client, *args, **kwargs) -> "Extension": self._listeners[auto_name] = listeners if hasattr(func, "__modal_data__"): - args, kwargs = func.__modal_data__ - func = client.modal(*args, **kwargs)(func) + all_modal_data: List[Tuple[tuple, dict]] = func.__modal_data__ + for args, kwargs in all_modal_data: + func = client.modal(*args, **kwargs)(func) - modal = kwargs.get("modal") or args[0] - _modal_id: str = modal.custom_id if isinstance(modal, Modal) else modal - modal_name = f"modal_{_modal_id}" + modal = kwargs.get("modal") or args[0] + _modal_id: str = modal.custom_id if isinstance(modal, Modal) else modal + modal_name = f"modal_{_modal_id}" - listeners = self._listeners.get(modal_name, []) - listeners.append(func) - self._listeners[modal_name] = listeners + listeners = self._listeners.get(modal_name, []) + listeners.append(func) + self._listeners[modal_name] = listeners for _, cmd in getmembers(self, predicate=lambda command: isinstance(command, Command)): cmd: Command @@ -1923,14 +1926,15 @@ def decorator(coro) -> Command: def extension_listener(func: Optional[Coroutine] = None, name: Optional[str] = None): def decorator(func: Coroutine): - func.__listener_name__ = name or func.__name__ + if not hasattr(func, "__listener_name__"): + func.__listener_name__ = [] + func.__listener_name__.append(name or func.__name__) return func if func: # allows omitting `()` on `@listener` - func.__listener_name__ = name or func.__name__ - return func + return decorator(func) return decorator @@ -1938,7 +1942,10 @@ def decorator(func: Coroutine): @wraps(Client.component) def extension_component(*args, **kwargs): def decorator(func): - func.__component_data__ = (args, kwargs) + if not hasattr(func, "__component_data__"): + func.__component_data__ = [] + func.__component_data__.append((args, kwargs)) + return func return decorator @@ -1947,13 +1954,11 @@ def decorator(func): @wraps(Client.autocomplete) def extension_autocomplete(*args, **kwargs): def decorator(func): - try: - if getattr(func, "__autocomplete_data__"): - func.__autocomplete_data__.append((args, kwargs)) - except AttributeError: - func.__autocomplete_data__ = [(args, kwargs)] - finally: - return func + if not hasattr(func, "__autocomplete_data__"): + func.__autocomplete_data__ = [] + func.__autocomplete_data__.append((args, kwargs)) + + return func return decorator @@ -1961,7 +1966,10 @@ def decorator(func): @wraps(Client.modal) def extension_modal(*args, **kwargs): def decorator(func): - func.__modal_data__ = (args, kwargs) + if not hasattr(func, "__modal_data__"): + func.__modal_data__ = [] + func.__modal_data__.append((args, kwargs)) + return func return decorator
[BUG] stacking extension_component() not working ### Describe the bug. When trying to stack extension_component decorators, only the first one is reacting. Stacking of @bot.component(..) works with the same custom_ids as expected. Don´t work whether in latest nor unstable Code Main: ``` import interactions bot = interactions.Client(...) bot.load("responsefilename") if __name__ == "__main__": bot.start() ``` Code Ext: ``` import interactions class Response(interactions.Extension): def __init__(self, client: interactions.Client) -> None: self.client=client @interactions.extension_component("resp_1") @interactions.extension_component("resp_2") .... @interactions.extension_component("resp_9") async def boost_col_response(self, ctx: interactions.ComponentContext): #dostuff def setup(client): Response(client) ``` ### List the steps. 1. import interactions in external file and main 2. load ext 3. stack interactions.extension_component("") ### What you expected. It should react to all custom_ids which defined in the decorators. ### What you saw. It only react to the custom_id of the very first decorator. ### What version of the library did you use? release ### Version specification _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2022-11-03T15:08:26
interactions-py/interactions.py
1,169
interactions-py__interactions.py-1169
[ "1162", "1162" ]
fcf76f6f9a5ef3712fbb643808507cb7bd0dcf03
diff --git a/interactions/utils/dict_caches.py b/interactions/utils/dict_caches.py --- a/interactions/utils/dict_caches.py +++ b/interactions/utils/dict_caches.py @@ -1,6 +1,8 @@ from collections import OrderedDict from typing import Generic, TypeVar +from .missing import MISSING + __all__ = ("FIFODict", "LRUDict") _KT = TypeVar("_KT") @@ -45,3 +47,14 @@ def __setitem__(self, key: _KT, value: _VT): # Prevent buildup over time while len(self) > self._max_items: del self[next(iter(self))] + + __marker = object() + + def pop(self, key: _KT, default: _VT = __marker) -> _VT: + if key in self: + result = self[key] + del self[key] + return result + if default is MISSING: + raise KeyError(key) + return default
[BUG] Cannot pop from Cache during dispatch. ### Describe the bug. When running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process. This is caused by the implementation of #482 ### List the steps. n/A ### What you expected. n/A ### What you saw. ``` Websocket have raised an exception, closing. Traceback (most recent call last): File "C:\Users\\Desktop\PycharmProjects\library\interactions\client\bot.py", line 440, in _login await self._websocket.run() File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 279, in run await self._handle_stream(msg) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 332, in _handle_stream self._dispatch_event(event, data) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 562, in _dispatch_event _message_cache.pop(message_id) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\cache.py", line 131, in pop return self.values.pop(key, default) File "C:\Users\\Desktop\PycharmProjects\library\interactions\utils\dict_caches.py", line 39, in __getitem__ self.move_to_end(key) KeyError: Snowflake(1040316644695756912) Process finished with exit code 0 ``` ### What version of the library did you use? unstable ### Version specification The unstable unstable version ### Code of Conduct - [X] I agree to follow the contribution requirements. [BUG] Cannot pop from Cache during dispatch. ### Describe the bug. When running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process. This is caused by the implementation of #482 ### List the steps. n/A ### What you expected. n/A ### What you saw. ``` Websocket have raised an exception, closing. Traceback (most recent call last): File "C:\Users\\Desktop\PycharmProjects\library\interactions\client\bot.py", line 440, in _login await self._websocket.run() File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 279, in run await self._handle_stream(msg) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 332, in _handle_stream self._dispatch_event(event, data) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 562, in _dispatch_event _message_cache.pop(message_id) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\cache.py", line 131, in pop return self.values.pop(key, default) File "C:\Users\\Desktop\PycharmProjects\library\interactions\utils\dict_caches.py", line 39, in __getitem__ self.move_to_end(key) KeyError: Snowflake(1040316644695756912) Process finished with exit code 0 ``` ### What version of the library did you use? unstable ### Version specification The unstable unstable version ### Code of Conduct - [X] I agree to follow the contribution requirements.
This would explain *way too much.* Glad the problem's been identified. @EdVraz Can you give some steps to consistently reproduce the bug? I'm trying to fix it, but using `channel.purge` doesn't trigger it for me 1. Send a message 2. Remove that message Or Remove not cached message(any object which can be cached) > @EdVraz Can you give some steps to consistently reproduce the bug? I'm trying to fix it, but using `channel.purge` doesn't trigger it for me @Catalyst4222 https://github.com/interactions-py/library/blob/unstable/interactions/api/gateway/client.py#L562 change this to `.pop(id)` and then join and leave a vc. This should trigger it. At least it did for me This would explain *way too much.* Glad the problem's been identified. @EdVraz Can you give some steps to consistently reproduce the bug? I'm trying to fix it, but using `channel.purge` doesn't trigger it for me 1. Send a message 2. Remove that message Or Remove not cached message(any object which can be cached) > @EdVraz Can you give some steps to consistently reproduce the bug? I'm trying to fix it, but using `channel.purge` doesn't trigger it for me @Catalyst4222 https://github.com/interactions-py/library/blob/unstable/interactions/api/gateway/client.py#L562 change this to `.pop(id)` and then join and leave a vc. This should trigger it. At least it did for me
2022-11-15T20:11:53
interactions-py/interactions.py
1,199
interactions-py__interactions.py-1199
[ "1180", "1180" ]
fa03bc2b74a2502ddaf4502ab055241861f81ed1
diff --git a/interactions/api/gateway/client.py b/interactions/api/gateway/client.py --- a/interactions/api/gateway/client.py +++ b/interactions/api/gateway/client.py @@ -946,6 +946,12 @@ async def _reconnect(self, to_resume: bool, code: Optional[int] = 1012) -> None: if self.__heartbeat_event.is_set(): self.__heartbeat_event.clear() # Because we're hardresetting the process + self._dispatch.dispatch( + "on_disconnect" + ) # will be followed by the on_ready event after reconnection + # reconnection happens whenever it disconnects either with or without a resume prompt + # as this is called whenever the WS client closes + if not to_resume: url = self.ws_url if self.ws_url else await self._http.get_gateway() else:
[REQUEST] Add event listener for bot disconnect ### Describe the feature. I am currently developing my own Discord bot using interactions.py, however due to my internet not being the best, the bot tends to go offline randomly. I would like to know when the bot goes offline, and adding an event listener to listen for bot disconnects can help with this. Discord.py has an on_disconnect function, so it would make sense to have one here for interactions.py, as interactions.py was built to be an extension of Discord.py, according to the FAQ section on the documentation website. ### Code of Conduct - [X] I agree to follow the contribution requirements. [REQUEST] Add event listener for bot disconnect ### Describe the feature. I am currently developing my own Discord bot using interactions.py, however due to my internet not being the best, the bot tends to go offline randomly. I would like to know when the bot goes offline, and adding an event listener to listen for bot disconnects can help with this. Discord.py has an on_disconnect function, so it would make sense to have one here for interactions.py, as interactions.py was built to be an extension of Discord.py, according to the FAQ section on the documentation website. ### Code of Conduct - [X] I agree to follow the contribution requirements.
This would be amazing. This would be amazing.
2022-12-14T01:48:53
interactions-py/interactions.py
1,217
interactions-py__interactions.py-1217
[ "1214", "1214" ]
6cc7648b8f0d4573bea9af482fc3236261708f40
diff --git a/interactions/client/bot.py b/interactions/client/bot.py --- a/interactions/client/bot.py +++ b/interactions/client/bot.py @@ -593,11 +593,10 @@ def __resolve_commands(self) -> None: # sourcery skip: low-code-quality cmd.listener = self._websocket._dispatch if cmd.default_scope and self._default_scope: - cmd.scope = ( + if isinstance(cmd.scope, list): cmd.scope.extend(self._default_scope) - if isinstance(cmd.scope, list) - else self._default_scope - ) + else: + cmd.scope = self._default_scope data: Union[dict, List[dict]] = cmd.full_data coro = cmd.dispatcher
[BUG] Error starting when using scope param in command decorator ### Describe the bug. When starting the bot with a command that has a scope as a param specified it results in a KeyError: ```py Could not prepare the client: Traceback (most recent call last): File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 404, in _ready await self.__sync() File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 621, in __sync if _guild_command["name"] not in __check_guild_commands[_guild_id]: KeyError: None ``` ### List the steps. 1. Create a bot: ```py ids = 8210301307------ bot = interactions.Client(token="", default_scope=ids) ``` 2. Create a command with a scope param: ```py @bot.command( name="test", description="test", scope=ids ) async def _test(ctx): await ctx.send("test") ``` 3. run the bot 4. see Traceback ### What you expected. The command should be correctly registered for the guilds provided in the default_scope and command scope File client/bot.py, line 517, in __resolve_commands: ```py if cmd.default_scope and self._default_scope: cmd.scope = ( cmd.scope.extend(self._default_scope) if isinstance(cmd.scope, list) else self._default_scope ) ``` ``cmd.scope.extend(self._default_scope)`` extends the cmd.scope, but then sets cmd.scope to its return value, which is None. This overrides the scope, resulting in None being passed as the scope to command in full_data. This passes None as the guild_id to the ApplicationCommand, which is used in when getting a guild_commands _guild_id => KeyError here ``if _guild_command["name"] not in __check_guild_commands[_guild_id]:`` replace ```py if cmd.default_scope and self._default_scope: cmd.scope = ( cmd.scope.extend(self._default_scope) if isinstance(cmd.scope, list) else self._default_scope ) ``` with ```py if cmd.default_scope and self._default_scope: if isinstance(cmd.scope, list): cmd.scope.extend(self._default_scope) else: cmd.scope = self._default_scope ``` ### What you saw. Instead, I received this traceback error given from my Python terminal: ```py Could not prepare the client: Traceback (most recent call last): File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 404, in _ready await self.__sync() File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 621, in __sync if _guild_command["name"] not in __check_guild_commands[_guild_id]: KeyError: None ``` ### What version of the library did you use? stable ### Version specification 4.3.4 ### Code of Conduct - [X] I agree to follow the contribution requirements. [BUG] Error starting when using scope param in command decorator ### Describe the bug. When starting the bot with a command that has a scope as a param specified it results in a KeyError: ```py Could not prepare the client: Traceback (most recent call last): File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 404, in _ready await self.__sync() File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 621, in __sync if _guild_command["name"] not in __check_guild_commands[_guild_id]: KeyError: None ``` ### List the steps. 1. Create a bot: ```py ids = 8210301307------ bot = interactions.Client(token="", default_scope=ids) ``` 2. Create a command with a scope param: ```py @bot.command( name="test", description="test", scope=ids ) async def _test(ctx): await ctx.send("test") ``` 3. run the bot 4. see Traceback ### What you expected. The command should be correctly registered for the guilds provided in the default_scope and command scope File client/bot.py, line 517, in __resolve_commands: ```py if cmd.default_scope and self._default_scope: cmd.scope = ( cmd.scope.extend(self._default_scope) if isinstance(cmd.scope, list) else self._default_scope ) ``` ``cmd.scope.extend(self._default_scope)`` extends the cmd.scope, but then sets cmd.scope to its return value, which is None. This overrides the scope, resulting in None being passed as the scope to command in full_data. This passes None as the guild_id to the ApplicationCommand, which is used in when getting a guild_commands _guild_id => KeyError here ``if _guild_command["name"] not in __check_guild_commands[_guild_id]:`` replace ```py if cmd.default_scope and self._default_scope: cmd.scope = ( cmd.scope.extend(self._default_scope) if isinstance(cmd.scope, list) else self._default_scope ) ``` with ```py if cmd.default_scope and self._default_scope: if isinstance(cmd.scope, list): cmd.scope.extend(self._default_scope) else: cmd.scope = self._default_scope ``` ### What you saw. Instead, I received this traceback error given from my Python terminal: ```py Could not prepare the client: Traceback (most recent call last): File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 404, in _ready await self.__sync() File "----/venv/lib/python3.10/site-packages/interactions/client/bot.py", line 621, in __sync if _guild_command["name"] not in __check_guild_commands[_guild_id]: KeyError: None ``` ### What version of the library did you use? stable ### Version specification 4.3.4 ### Code of Conduct - [X] I agree to follow the contribution requirements.
2023-01-08T19:04:56
interactions-py/interactions.py
1,243
interactions-py__interactions.py-1243
[ "1133", "1133" ]
9810c5b8b9d31455c372b59512cad98eb123886a
diff --git a/interactions/api/gateway/client.py b/interactions/api/gateway/client.py --- a/interactions/api/gateway/client.py +++ b/interactions/api/gateway/client.py @@ -404,7 +404,6 @@ def _dispatch_interaction_event(self, data: dict) -> None: _option = self.__sub_command_context(option, _context) __kwargs.update(_option) - self._dispatch.dispatch("on_command", _context) elif data["type"] == InteractionType.MESSAGE_COMPONENT: _name = f"component_{_context.data.custom_id}" diff --git a/interactions/client/models/command.py b/interactions/client/models/command.py --- a/interactions/client/models/command.py +++ b/interactions/client/models/command.py @@ -897,6 +897,8 @@ async def wrapper(ctx: "CommandContext", *args, **kwargs): ctx.command = self ctx.extension = self.extension + self.listener.dispatch("on_command", ctx) + try: if self.extension: return await coro(self.extension, ctx, *args, **kwargs)
[BUG] Context is missing `command` attribute in the `on_component` event ### Describe the bug. Taken from [`#AttributeError 'NoneType' object has no attribute 'name'`](https://canary.discord.com/channels/789032594456576001/1033886778165305424/1033886778165305424) in the help forum In the `on_command` event, there is a possibility that `ctx.command` is None instead of the associated Command instance. This is due to a race condition where some attribute processing for Context is done when the command is being invoked. This issue likely applies to other attributes as well, though that has not been tested yet A workaround is to call `asyncio.sleep` with a short delay, but users likely won't find that solution on their own ### List the steps. Set up the client with a command and a listener for the `on_command` event. In the event, access `ctx.command` ### What you expected. `ctx.command` to be a Command instance ### What you saw. `ctx.command` is None ### What version of the library did you use? release ### Version specification 4.3.2 ### Code of Conduct - [X] I agree to follow the contribution requirements. [BUG] Context is missing `command` attribute in the `on_component` event ### Describe the bug. Taken from [`#AttributeError 'NoneType' object has no attribute 'name'`](https://canary.discord.com/channels/789032594456576001/1033886778165305424/1033886778165305424) in the help forum In the `on_command` event, there is a possibility that `ctx.command` is None instead of the associated Command instance. This is due to a race condition where some attribute processing for Context is done when the command is being invoked. This issue likely applies to other attributes as well, though that has not been tested yet A workaround is to call `asyncio.sleep` with a short delay, but users likely won't find that solution on their own ### List the steps. Set up the client with a command and a listener for the `on_command` event. In the event, access `ctx.command` ### What you expected. `ctx.command` to be a Command instance ### What you saw. `ctx.command` is None ### What version of the library did you use? release ### Version specification 4.3.2 ### Code of Conduct - [X] I agree to follow the contribution requirements.
I see only one solution. Remove line `self._dispatch.dispatch("on_command", _context)` from `WebSocketClient._dispatch_interaction_event` and paste it into `Command.__wrap_coro`. It wont be breaking *I think* I see only one solution. Remove line `self._dispatch.dispatch("on_command", _context)` from `WebSocketClient._dispatch_interaction_event` and paste it into `Command.__wrap_coro`. It wont be breaking *I think*
2023-01-23T12:41:14
interactions-py/interactions.py
1,326
interactions-py__interactions.py-1326
[ "1325" ]
a6d8e223ef6481de6aef3a74149bc5c5286eef2f
diff --git a/interactions/models/internal/tasks/task.py b/interactions/models/internal/tasks/task.py --- a/interactions/models/internal/tasks/task.py +++ b/interactions/models/internal/tasks/task.py @@ -111,6 +111,7 @@ async def _task_loop(self) -> None: def start(self) -> None: """Start this task.""" try: + self.trigger.reschedule() self._stop.clear() self.task = asyncio.create_task(self._task_loop()) except RuntimeError: diff --git a/interactions/models/internal/tasks/triggers.py b/interactions/models/internal/tasks/triggers.py --- a/interactions/models/internal/tasks/triggers.py +++ b/interactions/models/internal/tasks/triggers.py @@ -15,6 +15,10 @@ def __new__(cls, *args, **kwargs) -> "BaseTrigger": def __or__(self, other: "BaseTrigger") -> "OrTrigger": return OrTrigger(self, other) + def reschedule(self) -> None: + """Update the last call time to now""" + self.last_call_time = datetime.now() + @abstractmethod def next_fire(self) -> datetime | None: """
[BUG] inconsistencies and bugs in tasks ### Describe the bug. In the latest 5.x, tasks don't work properly. Basically, there are 2 things that are going wrong: 1. If the task is not started immediately after the bot turns on, it will silently keep adding calls to the list. When the task is started, like maybe 10 seconds later, the past 10 tasks that were supposed to happen but weren't started, will be called with no delay between each other and all immediately, which should not happen in the first place. 2. When the task is stopped, it isn't really stopped. Sure, the code doesn't get executed, but when you restart the task, it may run many times with no delay, which would probably break whatever code you wanted to run at an interval. ### List the steps. Here is my code to reproduce this bug: ```py import interactions from asyncio import sleep from time import time bot = interactions.Client() increment = 0 last_call = time() @interactions.Task.create(interactions.IntervalTrigger(seconds=1)) async def my_task(): global increment, last_call increment += 1 print(increment, time() - last_call) last_call = time() @interactions.listen() async def on_startup(): print("Bot is ready!") @interactions.slash_command("test_task") async def test_task(ctx: interactions.SlashContext): await ctx.send("Watch the console for inconsistencies") print("# starting task") my_task.start() print("# waiting 10 seconds") await sleep(10) print("# stopping task") my_task.stop() print("# waiting 10 seconds") await sleep(10) print("# starting task again") my_task.start() print("# waiting 5 seconds") await sleep(5) print("# stopping task again") my_task.stop() bot.start("token") ``` ### What you expected. When the task is started in the command, it should only call it once, not many times because it wasn't started immediately. Whenever the task is stopped, it should actually stop, instead of having a backlog of callbacks that get called immediately when you start it again. ### What you saw. Here is the output of the above program: ```py Bot is ready! # starting task # waiting 10 seconds 1 9.517415285110474 2 0.0 3 0.0 4 0.0 5 0.0 6 0.0010004043579101562 7 0.0010004043579101562 8 0.0 9 0.0 10 0.4808993339538574 11 0.9963068962097168 12 1.0010671615600586 13 0.9928460121154785 14 0.9866108894348145 15 1.0092473030090332 16 0.9739875793457031 17 0.9932007789611816 18 0.9856703281402588 19 0.9991569519042969 # stopping task # waiting 10 seconds # starting task again # waiting 5 seconds 20 10.533191442489624 21 0.0009999275207519531 22 0.0 23 0.0 24 0.0010006427764892578 25 0.0 26 0.0 27 0.0 28 0.00400233268737793 29 0.0 30 0.464735746383667 31 0.9912123680114746 32 1.0092799663543701 33 0.9981844425201416 34 0.9897663593292236 # stopping task again ``` ### What version of the library did you use? unstable ### Version specification 5.x ### Code of Conduct - [X] I agree to follow the contribution requirements.
2023-04-07T06:24:12
interactions-py/interactions.py
1,394
interactions-py__interactions.py-1394
[ "1393" ]
8963704ce6af61aac7ea9a4f69e9d56e3e49dd67
diff --git a/interactions/api/http/http_requests/guild.py b/interactions/api/http/http_requests/guild.py --- a/interactions/api/http/http_requests/guild.py +++ b/interactions/api/http/http_requests/guild.py @@ -54,8 +54,8 @@ async def get_guild(self, guild_id: "Snowflake_Type", with_counts: bool = True) a guild object """ - params = {"guild_id": guild_id, "with_counts": int(with_counts)} - result = await self.request(Route("GET", "/guilds/{guild_id}"), params=params) + params = {"with_counts": int(with_counts)} + result = await self.request(Route("GET", "/guilds/{guild_id}", guild_id=guild_id), params=params) return cast(discord_typings.GuildData, result) async def get_guild_preview(self, guild_id: "Snowflake_Type") -> discord_typings.GuildPreviewData:
[BUG] fetch_guild() method not working. ### Library Version 5.2.0 ### Describe the Bug When calling Client.fetch_guild(_id_, force=True) the traceback reports: `Traceback (most recent call last): File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/client/client.py", line 1810, in __dispatch_interaction response = await callback File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/client/client.py", line 1693, in _run_slash_command return await command(ctx, **ctx.kwargs) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/models/internal/command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/models/internal/application_commands.py", line 770, in call_callback return await self.call_with_binding(callback, ctx) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/models/internal/callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) File "/home/ubuntu/discord/bot.py", line 148, in debug guild = await bot.fetch_guild(1234, force=True) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/client/client.py", line 2021, in fetch_guild return await self.cache.fetch_guild(guild_id, force=force) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/client/smart_cache.py", line 594, in fetch_guild data = await self._client.http.get_guild(guild_id) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/api/http/http_requests/guild.py", line 58, in get_guild result = await self.request(Route("GET", "/guilds/{guild_id}"), params=params) File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/api/http/http_client.py", line 387, in request async with self.__session.request(route.method, route.url, **kwargs) as response: File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/api/http/route.py", line 90, in url return f"{self.BASE}{self.resolved_path}" File "/home/ubuntu/discord/bot-venv/lib/python3.10/site-packages/interactions/api/http/route.py", line 71, in resolved_path return self.path.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in self.params.items()}) KeyError: 'guild_id' ` ### Steps to Reproduce Use the method as documented. ### Expected Results A guild object returned from the API. ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
This is replicable. A strange error too - we'll look into it.
2023-05-07T16:45:26
interactions-py/interactions.py
1,421
interactions-py__interactions.py-1421
[ "1420" ]
1e3685f72dbee64ab0f663c45a16e54c617cb484
diff --git a/interactions/models/discord/guild.py b/interactions/models/discord/guild.py --- a/interactions/models/discord/guild.py +++ b/interactions/models/discord/guild.py @@ -1457,7 +1457,7 @@ async def create_role( if name: payload["name"] = name - if permissions: + if permissions is not MISSING and permissions is not None: payload["permissions"] = str(int(permissions)) if colour := colour or color:
[BUG] Unable to create new `Role` with `Permissions.NONE` ### Library Version 5.5.1 ### Describe the Bug When attempting to create a new guild role with permissions set to `Permissions.NONE` (i.e. the role should have all permissions disabled), the role is instead created with Discord's default set of permissions for new guild roles. ### Steps to Reproduce Call the function `create_role` on a `interactions.Guild` object, and attempt to create a new role, passing in `permissions=interactions.Permissions.NONE`. Then through the Discord UI, observe the permissions enabled for the role. Notice that the default permissions for guild roles are all enabled. ### Expected Results When calling `interactions.Guild.create_role` with `permissions=interactions.Permissions.NONE`, the role created should have all guild permissions disabled on it. ### Minimal Reproducible Code ```python import interactions as ipy bot = ipy.Client(intents=ipy.Intents.DEFAULT, debug_scope=<your_guild_id>) @ipy.slash_command() async def create_role(ctx: ipy.SlashContext): role = await ctx.guild.create_role(name="test role", permissions=ipy.Permissions.NONE) await ctx.send(f"role's new permissions: {role.permissions}") if __name__ == "__main__": bot.start("<token>") ``` ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information I think the bug is just an improper falsey check on the `permissions` argument.
2023-05-24T05:18:15
interactions-py/interactions.py
1,467
interactions-py__interactions.py-1467
[ "1465" ]
1d5a99fb3aef45fbf145736dd02c738e0e6f93d3
diff --git a/interactions/client/mixins/send.py b/interactions/client/mixins/send.py --- a/interactions/client/mixins/send.py +++ b/interactions/client/mixins/send.py @@ -111,5 +111,8 @@ async def send( if message_data: message = self.client.cache.place_message_data(message_data) if delete_after: - await message.delete(delay=delete_after) + if kwargs.get("pass_self_into_delete"): # hack to pass in interaction/hybrid context + await message.delete(delay=delete_after, context=self) + else: + await message.delete(delay=delete_after) return message diff --git a/interactions/ext/hybrid_commands/context.py b/interactions/ext/hybrid_commands/context.py --- a/interactions/ext/hybrid_commands/context.py +++ b/interactions/ext/hybrid_commands/context.py @@ -311,6 +311,7 @@ async def send( tts=tts, flags=flags, delete_after=delete_after, + pass_self_into_delete=bool(self._slash_ctx), **kwargs, ) diff --git a/interactions/models/internal/context.py b/interactions/models/internal/context.py --- a/interactions/models/internal/context.py +++ b/interactions/models/internal/context.py @@ -523,6 +523,7 @@ async def send( tts=tts, flags=flags, delete_after=delete_after, + pass_self_into_delete=True, **kwargs, )
[BUG] Fix ctx.send("", delete_after) for ephemeral message ### Library Version 5.7.0 ### Describe the Bug ``` ValueError("Cannot delete ephemeral message without interaction context parameter") ``` ### Steps to Reproduce ctx is SlashContext ```py await ctx.send("some", delete_after=2) ``` ### Expected Results Delete the interaction message after 2 seconds. ### Minimal Reproducible Code ```python await ctx.send("some", delete_after=2) ``` ### Traceback ``` ValueError("Cannot delete ephemeral message without interaction context parameter") ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information I think it can be fixed if a context is passed. Perhaps add a way to pass context or something.
@BigChungus888 are you using `ctx.defer(ephemeral=True)` before this code? Yeah I do. But deleting ephemeral should not be a problem right? As ctx.delete() is able to do that.
2023-07-05T04:11:59
interactions-py/interactions.py
1,469
interactions-py__interactions.py-1469
[ "1464" ]
1d5a99fb3aef45fbf145736dd02c738e0e6f93d3
diff --git a/interactions/models/discord/guild.py b/interactions/models/discord/guild.py --- a/interactions/models/discord/guild.py +++ b/interactions/models/discord/guild.py @@ -1476,7 +1476,7 @@ async def create_role( payload["permissions"] = str(int(permissions)) if colour := colour or color: - payload["color"] = colour.value + payload["color"] = colour if isinstance(colour, int) else colour.value if hoist: payload["hoist"] = True
[BUG] color in create_role() doesn´t allow int ### Library Version 5.7.0 ### Describe the Bug the options colour and color in guild.create_role() have int as typehint included. By passing an integer you will get an AttributeError "'int' object has no attribute 'value'" For code reference: interactions.py > interactions > models > discord > guild.py lines: 1445, 1446, 1478 ### Steps to Reproduce ```python color = int("E67E22", 16)) new_role = await guild.create_role(name="rolename", color=color) ``` ### Expected Results creating a role with given color ### Minimal Reproducible Code _No response_ ### Traceback ``` AttributeError: 'int' object has no attribute 'value' File "interactions/client/client.py", line 1890, in __dispatch_interaction response = await callback File "interactions/models/internal/command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "interactions/models/internal/command.py", line 198, in call_callback await self.call_with_binding(callback, context, **context.kwargs) # type: ignore File "interactions/models/internal/callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) File "/home/Discord/***.py", line 418, in create_button new_role = await guild.create_role(name=name, color=color) File "interactions/models/discord/guild.py", line 1478, in create_role payload["color"] = colour.value ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information I´m not sure if "RGB integer" is something else which would work. By creating a Color object like `interactions.Color(int("E67E22", 16))` and passing it as color it works as expected. If this is the way to go, the typehint is wrong i guess
2023-07-05T04:22:09
interactions-py/interactions.py
1,480
interactions-py__interactions.py-1480
[ "1466" ]
61f34d8b9fbef3936f63eb4efb1e20b6ef3dc772
diff --git a/interactions/models/discord/message.py b/interactions/models/discord/message.py --- a/interactions/models/discord/message.py +++ b/interactions/models/discord/message.py @@ -24,7 +24,7 @@ from interactions.client.utils.attr_converters import timestamp_converter from interactions.client.utils.serializer import dict_filter_none from interactions.client.utils.text_utils import mentions -from interactions.models.discord.channel import BaseChannel +from interactions.models.discord.channel import BaseChannel, GuildChannel from interactions.models.discord.emoji import process_emoji_req_format from interactions.models.discord.file import UPLOADABLE_TYPE from interactions.models.discord.embed import process_embeds @@ -109,7 +109,7 @@ def _process_dict(cls, data: Dict[str, Any], _) -> Dict[str, Any]: @attrs.define(eq=False, order=False, hash=False, kw_only=True) class ChannelMention(DiscordObject): - guild_id: "Snowflake_Type" = attrs.field( + guild_id: "Snowflake_Type | None" = attrs.field( repr=False, ) """id of the guild containing the channel""" @@ -466,7 +466,7 @@ def _process_dict(cls, data: dict, client: "Client") -> dict: # noqa: C901 if channel_id not in found_ids and (channel := client.get_channel(channel_id)): channel_data = { "id": channel.id, - "guild_id": channel._guild_id, + "guild_id": channel._guild_id if isinstance(channel, GuildChannel) else None, "type": channel.type, "name": channel.name, }
[BUG] Unsupported channel type ### Library Version Release 5.7.0 ### Describe the Bug Got this error when starting the discord bot, and it's been reported in the discord server [here](https://discord.com/channels/789032594456576001/1124813096742039582) and [here](https://discord.com/channels/789032594456576001/1124719972560666656) might as well as put it here like it says. ``` Unsupported channel type for {'version': 1688246476947, 'type': 16, 'topic': None, 'template': '', 'rate_limit_per_user': 0, 'position': 0, 'permission_overwrites': [{'type': 0, 'id': '874709931612315678', 'deny': '0', 'allow': '377957182528'}, {'type': 0, 'id': '1106563044248657971', 'deny': '128641584856913', 'allow': '0'}, {'type': 0, 'id': '875786221018841181', 'deny': '17180008448', 'allow': '515396455489'}, {'type': 0, 'id': '1091834304776130581', 'deny': '0', 'allow': '68609'}, {'type': 0, 'id': '905542278192439346', 'deny': '377959221312', 'allow': '66560'}, {'type': 0, 'id': '864557936068395018', 'deny': '533381774416', 'allow': '2150629377'}], 'parent_id': '890301488260341800', 'nsfw': False, 'name': 'new-media', 'last_message_id': None, 'id': '1124811992167874763', 'hashes': {'version': 1, 'roles': {'hash': 'Cw77Dg'}, 'metadata': {'hash': '/8VcZg'}, 'channels': {'hash': 'E6X/kQ'}}, 'guild_id': '864557936068395018', 'guild_hashes': {'version': 1, 'roles': {'hash': 'Cw77Dg'}, 'metadata': {'hash': '/8VcZg'}, 'channels': {'hash': 'E6X/kQ'}}, 'flags': 0, 'default_sort_order': None, 'default_reaction_emoji': None, 'default_forum_layout': 2, 'available_tags': []} (16). ``` ### Steps to Reproduce * Start your discord bot up, and look at the console. ### Expected Results For this not to pop up and implement this as a channel type. ### Minimal Reproducible Code ```python * Start your discord bot up, and look at the console. ``` ### Traceback ``` Unsupported channel type for {'version': 1688246476947, 'type': 16, 'topic': None, 'template': '', 'rate_limit_per_user': 0, 'position': 0, 'permission_overwrites': [{'type': 0, 'id': '874709931612315678', 'deny': '0', 'allow': '377957182528'}, {'type': 0, 'id': '1106563044248657971', 'deny': '128641584856913', 'allow': '0'}, {'type': 0, 'id': '875786221018841181', 'deny': '17180008448', 'allow': '515396455489'}, {'type': 0, 'id': '1091834304776130581', 'deny': '0', 'allow': '68609'}, {'type': 0, 'id': '905542278192439346', 'deny': '377959221312', 'allow': '66560'}, {'type': 0, 'id': '864557936068395018', 'deny': '533381774416', 'allow': '2150629377'}], 'parent_id': '890301488260341800', 'nsfw': False, 'name': 'new-media', 'last_message_id': None, 'id': '1124811992167874763', 'hashes': {'version': 1, 'roles': {'hash': 'Cw77Dg'}, 'metadata': {'hash': '/8VcZg'}, 'channels': {'hash': 'E6X/kQ'}}, 'guild_id': '864557936068395018', 'guild_hashes': {'version': 1, 'roles': {'hash': 'Cw77Dg'}, 'metadata': {'hash': '/8VcZg'}, 'channels': {'hash': 'E6X/kQ'}}, 'flags': 0, 'default_sort_order': None, 'default_reaction_emoji': None, 'default_forum_layout': 2, 'available_tags': []} (16). ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
#1458 will close this. Do you want me to close this issue? Also, what the ETA of this pr? No harm in keeping the issue, but no idea when the PR will be finished. We're a bit slow right now for various reasons, but I'd imagine it'll be finished sooner rather than later. Alright, thank you. When I mention media channel in any channel, it comes up with ``` Task exception was never retrieved future: <Task finished name='Task-253' coro=<MessageEvents._on_raw_message_create() done, defined at /config/.local/lib/python3.10/site-packages/interactions/api/events/processors/message_events.py:15> exception=AttributeError("'BaseChannel' object has no attribute '_guild_id'")> Traceback (most recent call last): File "/config/.local/lib/python3.10/site-packages/interactions/api/events/processors/message_events.py", line 24, in _on_raw_message_create msg = self.cache.place_message_data(event.data) File "/config/.local/lib/python3.10/site-packages/interactions/client/smart_cache.py", line 421, in place_message_data message = Message.from_dict(data, self._client) File "/config/.local/lib/python3.10/site-packages/interactions/models/discord/base.py", line 36, in from_dict data = cls._process_dict(data, client) File "/config/.local/lib/python3.10/site-packages/interactions/models/discord/message.py", line 468, in _process_dict "guild_id": channel._guild_id, AttributeError: 'BaseChannel' object has no attribute '_guild_id' Task exception was never retrieved future: <Task finished name='Task-258' coro=<ReactionEvents._on_raw_message_reaction_add() done, defined at /config/.local/lib/python3.10/site-packages/interactions/api/events/processors/reaction_events.py:66> exception=AttributeError("'BaseChannel' object has no attribute '_guild_id'")> Traceback (most recent call last): File "/config/.local/lib/python3.10/site-packages/interactions/api/events/processors/reaction_events.py", line 68, in _on_raw_message_reaction_add await self._handle_message_reaction_change(event, add=True) File "/config/.local/lib/python3.10/site-packages/interactions/api/events/processors/reaction_events.py", line 56, in _handle_message_reaction_change message = await self.cache.fetch_message(event.data.get("channel_id"), event.data.get("message_id")) File "/config/.local/lib/python3.10/site-packages/interactions/client/smart_cache.py", line 384, in fetch_message message = self.place_message_data(data) File "/config/.local/lib/python3.10/site-packages/interactions/client/smart_cache.py", line 421, in place_message_data message = Message.from_dict(data, self._client) File "/config/.local/lib/python3.10/site-packages/interactions/models/discord/base.py", line 36, in from_dict data = cls._process_dict(data, client) File "/config/.local/lib/python3.10/site-packages/interactions/models/discord/message.py", line 468, in _process_dict "guild_id": channel._guild_id, AttributeError: 'BaseChannel' object has no attribute '_guild_id' ``` this error. I do believe it will be solved in #1458, but I'm not sure. That's a bug with base channels. Nothing to do with your issue Ref the media channel pr. It's functional now; its only draft because media channels were early beta when I made it They're more widespread now - assuming unpacking goes as planned I'll be able to retest and merge tomorrow > That's a bug with base channels. Nothing to do with your issue Do you want me to re-open an issue in Discord or Git Hub for this issue?
2023-07-13T19:38:59
interactions-py/interactions.py
1,500
interactions-py__interactions.py-1500
[ "1498" ]
b33ab89c8c79f4447c4a5d5a89f06912e2c92cae
diff --git a/interactions/models/discord/invite.py b/interactions/models/discord/invite.py --- a/interactions/models/discord/invite.py +++ b/interactions/models/discord/invite.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: from interactions.client import Client - from interactions.models import TYPE_GUILD_CHANNEL + from interactions.models import TYPE_GUILD_CHANNEL, Guild from interactions.models.discord.user import User from interactions.models.discord.snowflake import Snowflake_Type @@ -25,54 +25,60 @@ @attrs.define(eq=False, order=False, hash=False, kw_only=True) class Invite(ClientObject): code: str = attrs.field(repr=True) - """the invite code (unique ID)""" + """The invite code (unique ID)""" # metadata uses: int = attrs.field(default=0, repr=True) - """the guild this invite is for""" + """How many times this invite has been used""" max_uses: int = attrs.field(repr=False, default=0) - """max number of times this invite can be used""" + """Max number of times this invite can be used""" max_age: int = attrs.field(repr=False, default=0) - """duration (in seconds) after which the invite expires""" + """Duration (in seconds) after which the invite expires""" created_at: Timestamp = attrs.field(default=MISSING, converter=optional_c(timestamp_converter), repr=True) - """when this invite was created""" + """When this invite was created""" temporary: bool = attrs.field(default=False, repr=True) - """whether this invite only grants temporary membership""" + """Whether this invite only grants temporary membership""" # target data target_type: Optional[Union[InviteTargetType, int]] = attrs.field( default=None, converter=optional_c(InviteTargetType), repr=True ) - """the type of target for this voice channel invite""" + """The type of target for this voice channel invite""" approximate_presence_count: Optional[int] = attrs.field(repr=False, default=MISSING) - """approximate count of online members, returned from the `GET /invites/<code>` endpoint when `with_counts` is `True`""" + """Approximate count of online members, returned when fetching invites with `with_counts` set as `True`""" approximate_member_count: Optional[int] = attrs.field(repr=False, default=MISSING) - """approximate count of total members, returned from the `GET /invites/<code>` endpoint when `with_counts` is `True`""" + """Approximate count of total members, returned when fetching invites with `with_counts` set as `True`""" scheduled_event: Optional["Snowflake_Type"] = attrs.field( default=None, converter=optional_c(to_snowflake), repr=True ) - """guild scheduled event data, only included if `guild_scheduled_event_id` contains a valid guild scheduled event id""" + """Guild scheduled event data, only included if `guild_scheduled_event_id` contains a valid guild scheduled event id""" expires_at: Optional[Timestamp] = attrs.field(default=None, converter=optional_c(timestamp_converter), repr=True) - """the expiration date of this invite, returned from the `GET /invites/<code>` endpoint when `with_expiration` is `True`""" + """The expiration date of this invite, returned when fetching invites with `with_expiration` set as `True`""" stage_instance: Optional[StageInstance] = attrs.field(repr=False, default=None) - """stage instance data if there is a public Stage instance in the Stage channel this invite is for (deprecated)""" + """Stage instance data if there is a public Stage instance in the Stage channel this invite is for (deprecated)""" target_application: Optional[dict] = attrs.field(repr=False, default=None) - """the embedded application to open for this voice channel embedded application invite""" + """The embedded application to open for this voice channel embedded application invite""" guild_preview: Optional[GuildPreview] = attrs.field(repr=False, default=MISSING) - """the guild this invite is for""" + """The guild this invite is for - not given in invite events""" # internal for props _channel_id: "Snowflake_Type" = attrs.field(converter=to_snowflake, repr=True) + _guild_id: Optional["Snowflake_Type"] = attrs.field(default=None, converter=optional_c(to_snowflake), repr=True) _inviter_id: Optional["Snowflake_Type"] = attrs.field(default=None, converter=optional_c(to_snowflake), repr=True) _target_user_id: Optional["Snowflake_Type"] = attrs.field( repr=False, default=None, converter=optional_c(to_snowflake) ) @property - def channel(self) -> "TYPE_GUILD_CHANNEL": - """The channel the invite is for.""" + def channel(self) -> Optional["TYPE_GUILD_CHANNEL"]: + """The cached channel the invite is for.""" return self._client.cache.get_channel(self._channel_id) + @property + def guild(self) -> Optional["Guild"]: + """The cached guild the invite is.""" + return self._client.cache.get_guild(self._guild_id) if self._guild_id else None + @property def inviter(self) -> Optional["User"]: """The user that created the invite or None.""" @@ -95,16 +101,23 @@ def _process_dict(cls, data: Dict[str, Any], client: "Client") -> Dict[str, Any] data["scheduled_event"] = data["target_event_id"] if channel := data.pop("channel", None): - # invite metadata does not contain enough info to create a channel object + client.cache.place_channel_data(channel) data["channel_id"] = channel["id"] if guild := data.pop("guild", None): data["guild_preview"] = GuildPreview.from_dict(guild, client) + data["guild_id"] = guild["id"] + elif guild_id := data.pop("guild_id", None): + data["guild_id"] = guild_id if inviter := data.pop("inviter", None): inviter = client.cache.place_user_data(inviter) data["inviter_id"] = inviter.id + if target_user := data.pop("target_user", None): + target_user = client.cache.place_user_data(target_user) + data["target_user_id"] = target_user.id + return data def __str__(self) -> str:
[BUG] Invites have no guild parameter for create/delete event ### Library Version 5.8.0 ### Describe the Bug Invites have no guild_in parameter or guild object ### Steps to Reproduce try to find out which server the event comes from when creating/deleting an invites ### Expected Results that the guild parameter exists if the invite is from a server ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
It's under `guild_preview` as Discord only gives partial guild data. the `guild_preview` is missing in the invite create/delet event ... How do I get access to the guild ID shown in the screenshot below? ![image](https://github.com/interactions-py/interactions.py/assets/51890392/08910bc3-fa80-46ad-82ac-d5d338c65107) Oh lord Discord really gives a whole different payload for invite creations and deletions— I guess it's not the first time. Reopening.
2023-07-21T21:43:42
interactions-py/interactions.py
1,555
interactions-py__interactions.py-1555
[ "1554" ]
6592f8268d8bd174ddb81aacc75b169db32267ff
diff --git a/interactions/models/internal/application_commands.py b/interactions/models/internal/application_commands.py --- a/interactions/models/internal/application_commands.py +++ b/interactions/models/internal/application_commands.py @@ -609,8 +609,8 @@ def _add_option_from_anno_method(self, name: str, option: SlashCommandOption) -> if not self.options: self.options = [] - if option.name is None: - option.name = name + if option.name.default is None: + option.name = LocalisedName.converter(name) else: option.argument_name = name
[BUG] slash_user_option (and probably others) fail to register if name is not explicitly provided ### Library Version 5.10.0 ### Describe the Bug slash_user_command's docstring says: ```py """ Annotates an argument as a user type slash command option. Args: description: The description of your option required: Is this option required? autocomplete: Use autocomplete for this option name: The name of the option. Defaults to the name of the argument """ ``` > Defaults to the name of the argument However using an argument like: ```py async def my_command( self, ctx: SlashContext, user: slash_user_option("Some user", required=True) ): ``` Results in a gateway error and the command not being registered: ``` bot.internal: INFO Overwriting 0 with 1 application commands bot.internal: ERROR PUT::https://discord.com/api/v10/applications/BOT_ID/commands: 400 bot.internal: ERROR Error in command `do_something`: options->None->name APPLICATION_COMMAND_INVALID_NAME: Command name is invalid ``` This also happens outside of extensions (see minimal reproducible code). Not sure if this is a code bug or a documentation bug, but feels like a code bug. ### Steps to Reproduce See minimal reproducible code ### Expected Results A slash command `do_something` is created with a single argument named `user` ### Minimal Reproducible Code ```python import logging from interactions import Client, Intents, Permissions, SlashContext, slash_command, slash_user_option logger = logging.getLogger("bot.internal") console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") console.setFormatter(formatter) global_logger = logging.getLogger("bot") global_logger.setLevel(logging.INFO) global_logger.addHandler(console) bot = Client(intents=Intents.DEFAULT, logger=logger) @slash_command( description="Do something.", default_member_permissions=Permissions.ADMINISTRATOR ) async def do_something( ctx: SlashContext, user: slash_user_option("The user to do something with.", required=True) ): # note there's no name="user" provided! await ctx.respond(f"Hello {user.mention}!") bot.start("token") ``` ### Traceback No unless gateway error counts with logging enabled Gateway error ``` bot.internal: INFO Overwriting 0 with 1 application commands bot.internal: ERROR PUT::https://discord.com/api/v10/applications/BOT_ID/commands: 400 bot.internal: ERROR Error in command `do_something`: options->None->name APPLICATION_COMMAND_INVALID_NAME: Command name is invalid ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
Huh, wack. I swore this was working when I first added the feature, but evidentially it's not? I'm not sure how that's possible looking at the code that handles it, but regardless I'll take a look at this later today. I wonder if there's something in my venv that breaks this handling, but I don't think that would be possible at all. Will try on a clean venv later
2023-09-22T15:25:14
interactions-py/interactions.py
1,593
interactions-py__interactions.py-1593
[ "1552" ]
6566dda05a0bc22722330c1fdfc9de7731727f6b
diff --git a/interactions/client/client.py b/interactions/client/client.py --- a/interactions/client/client.py +++ b/interactions/client/client.py @@ -2012,6 +2012,7 @@ def load_extensions( self, *packages: str, recursive: bool = False, + **load_kwargs: Any, ) -> None: """ Load multiple extensions at once. @@ -2035,7 +2036,7 @@ def load_extensions( extensions = [f.replace(os.path.sep, ".").replace(".py", "") for f in glob.glob(pattern, recursive=True)] for ext in extensions: - self.load_extension(ext) + self.load_extension(ext, **load_kwargs) def unload_extension( self, name: str, package: str | None = None, force: bool = False, **unload_kwargs: Any
[FEAT] Add the ability to pass arguments to all packages in load_extensions method ### Problem Description The load_extensions method was recently added, however it does not have the ability to pass arguments through to the setup function. ### Proposed Solution Change recursive key word into **kwargs and use kwargs.get(). Remove this then pass through the rest of the kwargs through to the setup function. ### Alternatives Considered Change *packages: str into packages: tuple[str] and pass through *args to the setup function. ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
I want to make sure I understand your request right: what exactly are you wanting a `**kwargs` signature to pass? We only use the `recursive` argument to handle pattern handling when we check your path/cwd for extension files. I don't think it's wise for us to remove it because it's important for parsing extension import resolutions, *especially* since we're handling numerous extensions. If you don't mind extending off of this example, it would help me: ```py def load_extension( self, *packages: str, **kwargs ): for (...): ... extensions = [ f.replace(os.path.sep, ".").replace(".py", "") for f in glob.glob(pattern, recursive=kwargs.get("recursive", False)) ] ``` The only thing I can think of is replicating the existing logic we use for `.load_extension()`. To my knowledge, `setup()` should already allow for this [as seen here](https://interactions-py.github.io/interactions.py/Guides/20%20Extensions/#passing-arguments-to-extensions). `load_extension("filename")` supports arbitrary kwargs. `load_extensions("folder")` does not. This issue is asking for feature parity between the two.
2023-12-10T03:07:29
interactions-py/interactions.py
1,611
interactions-py__interactions.py-1611
[ "1592" ]
9f2fda788812adbc42b499fb2c45fc7fd1fd96e0
diff --git a/interactions/models/discord/channel.py b/interactions/models/discord/channel.py --- a/interactions/models/discord/channel.py +++ b/interactions/models/discord/channel.py @@ -2389,7 +2389,7 @@ async def close_stage(self, reason: Absent[Optional[str]] = MISSING) -> None: @attrs.define(eq=False, order=False, hash=False, kw_only=True) -class GuildForum(GuildChannel): +class GuildForum(GuildChannel, InvitableMixin): available_tags: List[ThreadTag] = attrs.field(repr=False, factory=list) """A list of tags available to assign to threads""" default_reaction_emoji: Optional[DefaultReaction] = attrs.field(repr=False, default=None)
[FEAT] Add create_invite method to GuildForum ### Library Version 5.11.0 ### Describe the Bug `AttributeError: 'GuildForum' object has no attribute 'create_invite'` ![image](https://github.com/interactions-py/interactions.py/assets/51890392/6ab266eb-d073-4663-a856-770755bab2b4) ### Steps to Reproduce Try creating an invitation for a forum channel. ### Expected Results That you can create one. ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
This is a feature request, I suggest submitting a PR (you or anyone else for that matter) for implementing this.
2024-02-21T05:40:34
interactions-py/interactions.py
1,612
interactions-py__interactions.py-1612
[ "1591" ]
b41c47e8cdd6fc9b1d0c1400b3db8f217030db92
diff --git a/interactions/api/http/http_requests/channels.py b/interactions/api/http/http_requests/channels.py --- a/interactions/api/http/http_requests/channels.py +++ b/interactions/api/http/http_requests/channels.py @@ -585,6 +585,7 @@ async def create_tag( name: str, emoji_id: Optional["Snowflake_Type"] = None, emoji_name: Optional[str] = None, + moderated: bool = False, ) -> discord_typings.ChannelData: """ Create a new tag. @@ -594,6 +595,7 @@ async def create_tag( name: The name of the tag emoji_id: The ID of the emoji to use for the tag emoji_name: The name of the emoji to use for the tag + moderated: whether this tag can only be added to or removed from threads by a member with the MANAGE_THREADS permission !!! note Can either have an `emoji_id` or an `emoji_name`, but not both. @@ -603,6 +605,7 @@ async def create_tag( "name": name, "emoji_id": int(emoji_id) if emoji_id else None, "emoji_name": emoji_name or None, + "moderated": moderated, } payload = dict_filter_none(payload) diff --git a/interactions/models/discord/channel.py b/interactions/models/discord/channel.py --- a/interactions/models/discord/channel.py +++ b/interactions/models/discord/channel.py @@ -2581,13 +2581,16 @@ def predicate(tag: ThreadTag) -> Optional["ThreadTag"]: return next((tag for tag in self.available_tags if predicate(tag)), None) - async def create_tag(self, name: str, emoji: Union["models.PartialEmoji", dict, str, None] = None) -> "ThreadTag": + async def create_tag( + self, name: str, emoji: Union["models.PartialEmoji", dict, str, None] = None, moderated: bool = False + ) -> "ThreadTag": """ Create a tag for this forum. Args: name: The name of the tag emoji: The emoji to use for the tag + moderated: whether this tag can only be added to or removed from threads by a member with the MANAGE_THREADS permission !!! note If the emoji is a custom emoji, it must be from the same guild as the channel. @@ -2596,7 +2599,7 @@ async def create_tag(self, name: str, emoji: Union["models.PartialEmoji", dict, The created tag object. """ - payload = {"channel_id": self.id, "name": name} + payload = {"channel_id": self.id, "name": name, "moderated": moderated} if emoji: if isinstance(emoji, str):
[BUG] Moderated argument missing for tags ### Library Version 5.11.0 ### Describe the Bug While creating a new tag for a forum you can set the moderation value for it. ![image](https://github.com/interactions-py/interactions.py/assets/51890392/af101b91-e144-4b91-967d-c8e71b5129b2) ![image](https://github.com/interactions-py/interactions.py/assets/51890392/131b8810-ba25-4b13-9f92-36a5695011a3) https://discord.com/developers/docs/resources/channel#forum-tag-object-forum-tag-structure ### Steps to Reproduce Try to create a mod only tag for a forum channel. ### Expected Results That you can make the tag mod only ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
Is this only an issue for `.create_tag()`? This also appears to be a feature request since it's missing functionality.
2024-02-21T05:47:24
interactions-py/interactions.py
1,616
interactions-py__interactions.py-1616
[ "1614" ]
9f2fda788812adbc42b499fb2c45fc7fd1fd96e0
diff --git a/interactions/client/mixins/send.py b/interactions/client/mixins/send.py --- a/interactions/client/mixins/send.py +++ b/interactions/client/mixins/send.py @@ -47,6 +47,8 @@ async def send( silent: bool = False, flags: Optional[Union[int, "MessageFlags"]] = None, delete_after: Optional[float] = None, + nonce: Optional[str | int] = None, + enforce_nonce: bool = False, **kwargs: Any, ) -> "Message": """ @@ -67,6 +69,10 @@ async def send( silent: Should this message be sent without triggering a notification. flags: Message flags to apply. delete_after: Delete message after this many seconds. + nonce: Used to verify a message was sent. Can be up to 25 characters. + enforce_nonce: If enabled and nonce is present, it will be checked for uniqueness in the past few minutes. \ + If another message was created by the same author with the same nonce, that message will be returned \ + and no new message will be created. Returns: New message object that was sent. @@ -95,6 +101,9 @@ async def send( "Attachments are not files. Attachments only contain metadata about the file, not the file itself - to send an attachment, you need to download it first. Check Attachment.url" ) + if enforce_nonce and not nonce: + raise ValueError("You must provide a nonce to use enforce_nonce.") + message_payload = models.discord.message.process_message_payload( content=content, embeds=embeds or embed, @@ -104,6 +113,8 @@ async def send( reply_to=reply_to, tts=tts, flags=flags, + nonce=nonce, + enforce_nonce=enforce_nonce, **kwargs, ) diff --git a/interactions/models/discord/message.py b/interactions/models/discord/message.py --- a/interactions/models/discord/message.py +++ b/interactions/models/discord/message.py @@ -926,6 +926,8 @@ def process_message_payload( attachments: Optional[List[Union[Attachment, dict]]] = None, tts: bool = False, flags: Optional[Union[int, MessageFlags]] = None, + nonce: Optional[str | int] = None, + enforce_nonce: bool = False, **kwargs, ) -> dict: """ @@ -941,6 +943,10 @@ def process_message_payload( attachments: The attachments to keep, only used when editing message. tts: Should this message use Text To Speech. flags: Message flags to apply. + nonce: Used to verify a message was sent. + enforce_nonce: If enabled and nonce is present, it will be checked for uniqueness in the past few minutes. \ + If another message was created by the same author with the same nonce, that message will be returned \ + and no new message will be created. Returns: Dictionary @@ -969,6 +975,8 @@ def process_message_payload( "attachments": attachments, "tts": tts, "flags": flags, + "nonce": nonce, + "enforce_nonce": enforce_nonce, **kwargs, } )
[FEAT] Add in `nonce` and `enforce_nonce` to `SendMixin` ### Problem Description `nonce` has always been a thing when sending messages - it's a weird field that's rarely used. However, with [`enforce_nonce`](https://github.com/discord/discord-api-docs/commit/d8effe15d56a12eec0bb977a5bf4487b9ebffcad) being added, it suddenly has a lot more uses - at the very least, it can be used as an easy way of making sure duplicate messages don't get send. However, `SendMixin` does not have native support for either `nonce` or `enforce_nonce`. While it *can* still be done thanks to `kwargs`, it would be nice if it were more properly supported. ### Proposed Solution Add in `nonce` and `enforce_nonce` to `SendMixin`, with a check to make sure `nonce` was given if `enforce_nonce` is enabled. ### Alternatives Considered _No response_ ### Additional Information _No response_ ### Code of Conduct - [X] I agree to follow the contribution requirements.
2024-02-21T18:25:37
interactions-py/interactions.py
1,619
interactions-py__interactions.py-1619
[ "1517", "1517" ]
f2becee080cd2be027f13fd02b3f033a601c5321
diff --git a/interactions/client/client.py b/interactions/client/client.py --- a/interactions/client/client.py +++ b/interactions/client/client.py @@ -1353,6 +1353,17 @@ def add_modal_callback(self, command: ModalCommand) -> None: command: The command to add """ + # test for parameters that arent the ctx (or self) + if command.has_binding: + callback = functools.partial(command.callback, None, None) + else: + callback = functools.partial(command.callback, None) + + if not inspect.signature(callback).parameters: + # if there are none, notify the command to just pass the ctx and not kwargs + # TODO: just make modal callbacks not pass kwargs at all (breaking) + command._just_ctx = True + for listener in command.listeners: if isinstance(listener, re.Pattern): if listener in self._regex_component_callbacks.keys(): diff --git a/interactions/models/internal/application_commands.py b/interactions/models/internal/application_commands.py --- a/interactions/models/internal/application_commands.py +++ b/interactions/models/internal/application_commands.py @@ -845,7 +845,13 @@ class ComponentCommand(InteractionCommand): @attrs.define(eq=False, order=False, hash=False, kw_only=True) -class ModalCommand(ComponentCommand): ... +class ModalCommand(ComponentCommand): + _just_ctx: bool = attrs.field(repr=False, default=False) + + async def call_callback(self, callback: Callable, context: "BaseContext") -> None: + if self._just_ctx: + return await self.call_with_binding(callback, context) + return await super().call_callback(callback, context) def _unpack_helper(iterable: typing.Iterable[str]) -> list[str]:
[BUG] modal_callback ### Library Version 5.8.0 ### Describe the Bug When using the @component_callback I seem to be getting an error related to the ID or Custom ID of an InputText field. ### Minimal Reproducible Code ```python from interactions import Client, InputText, Modal, SlashContext, TextStyles, modal_callback, slash_command bot = Client() @slash_command(name="trigger_modal") async def trigger_modal(ctx: SlashContext): modal = Modal( InputText( style=TextStyles.PARAGRAPH, label="URL (glitchii or discohook)", required=True, min_length=5, ), title="Embed Builder", custom_id="embed_builder_modal", ) await ctx.send_modal(modal) @modal_callback("embed_builder_modal") async def embed_builder_modal(ctx: SlashContext): await ctx.send("this is placeholder text") bot.start("token") ``` ### Traceback ```[2023-08-07 05:07:40] [ERROR ] Ignoring exception in Modal Callback for custom_id embed_builder_modal: Traceback (most recent call last): File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/client/client.py", line 1890, in __dispatch_interaction response = await callback ^^^^^^^^^^^^^^ File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/command.py", line 198, in call_callback await self.call_with_binding(callback, context, **context.kwargs) # type: ignore ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: EmbedBuilder.embed_builder_modal() got an unexpected keyword argument 'bb430638-9361-464d-b570-f4304e2f25d7' ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_ [BUG] modal_callback ### Library Version 5.8.0 ### Describe the Bug When using the @component_callback I seem to be getting an error related to the ID or Custom ID of an InputText field. ### Minimal Reproducible Code ```python from interactions import Client, InputText, Modal, SlashContext, TextStyles, modal_callback, slash_command bot = Client() @slash_command(name="trigger_modal") async def trigger_modal(ctx: SlashContext): modal = Modal( InputText( style=TextStyles.PARAGRAPH, label="URL (glitchii or discohook)", required=True, min_length=5, ), title="Embed Builder", custom_id="embed_builder_modal", ) await ctx.send_modal(modal) @modal_callback("embed_builder_modal") async def embed_builder_modal(ctx: SlashContext): await ctx.send("this is placeholder text") bot.start("token") ``` ### Traceback ```[2023-08-07 05:07:40] [ERROR ] Ignoring exception in Modal Callback for custom_id embed_builder_modal: Traceback (most recent call last): File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/client/client.py", line 1890, in __dispatch_interaction response = await callback ^^^^^^^^^^^^^^ File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/command.py", line 198, in call_callback await self.call_with_binding(callback, context, **context.kwargs) # type: ignore ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/aiden/tfc-bot/.venv/lib/python3.11/site-packages/interactions/models/internal/callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: EmbedBuilder.embed_builder_modal() got an unexpected keyword argument 'bb430638-9361-464d-b570-f4304e2f25d7' ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
This happens because `BaseContext` passes along `**ctx.kwargs`, which for modals is basically an alias to `ctx.responses`, to the raw callback itself. Unfortunately, it's not possible to fix this without a breaking change. I think I might have found a way of fixing this without any breaking changes by detecting how many arguments the callback has and adjusting behavior from there. If it works, I'll PR it in a bit. This happens because `BaseContext` passes along `**ctx.kwargs`, which for modals is basically an alias to `ctx.responses`, to the raw callback itself. Unfortunately, it's not possible to fix this without a breaking change. I think I might have found a way of fixing this without any breaking changes by detecting how many arguments the callback has and adjusting behavior from there. If it works, I'll PR it in a bit.
2024-02-21T23:44:48
interactions-py/interactions.py
1,624
interactions-py__interactions.py-1624
[ "1623" ]
6c12971d36960feff3a8d5207cd73996e351cb4c
diff --git a/interactions/models/internal/context.py b/interactions/models/internal/context.py --- a/interactions/models/internal/context.py +++ b/interactions/models/internal/context.py @@ -35,6 +35,7 @@ ) from interactions.models.discord.snowflake import Snowflake, Snowflake_Type, to_snowflake, to_optional_snowflake from interactions.models.discord.embed import Embed +from interactions.models.discord.timestamp import Timestamp from interactions.models.internal.application_commands import ( OptionType, CallbackType, @@ -329,16 +330,16 @@ def command(self) -> InteractionCommand: return self.client._interaction_lookup[self._command_name] @property - def expires_at(self) -> typing.Optional[datetime.datetime]: + def expires_at(self) -> Timestamp: """The time at which the interaction expires.""" - if self.responded: + if self.responded or self.deferred: return self.id.created_at + datetime.timedelta(minutes=15) return self.id.created_at + datetime.timedelta(seconds=3) @property def expired(self) -> bool: """Whether the interaction has expired.""" - return datetime.datetime.utcnow() > self.expires_at + return Timestamp.utcnow() > self.expires_at @property def invoke_target(self) -> str:
[BUG] interactions.SlashContext.expired: TypeError: can't compare offset-naive and offset-aware datetimes ### Library Version 5.11.0 ### Describe the Bug When i access the `interactions.SlashContext.expired` propety on a `ctx: interactions.SlashContext` object it raises an error ``` Lib\site-packages\interactions\models\internal\context.py", line 341, in expired return datetime.datetime.utcnow() > self.expires_at ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: can't compare offset-naive and offset-aware datetimes ``` ### Steps to Reproduce simply access this propety on a ctx object ### Expected Results it should return a bool, True if its expired and False if its not ### Minimal Reproducible Code ```python import interactions @interactions.slash_command(name="ping",description=f"Test if the bot is responding") async def ping_test(ctx: interactions.SlashContext): if ctx.expired: print('uh oh ive expired!') bot = interactions.Client() bot.start("token_here") ``` ### Traceback Ignoring exception in cmd `/ping`: Traceback (most recent call last): File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\client\client.py", line 1900, in __dispatch_interaction response = await callback ^^^^^^^^^^^^^^ File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\client\client.py", line 1771, in _run_slash_command return await command(ctx, **ctx.kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\models\internal\command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\models\internal\application_commands.py", line 802, in call_callback return await self.call_with_binding(callback, ctx) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\models\internal\callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\coding_projects\Python_projects\eZwizard3-bot\main.py", line 5, in ping_test if ctx.expired: ^^^^^^^^^^^ File "C:\Users\Gamer\AppData\Local\Programs\Python\Python311\Lib\site-packages\interactions\models\internal\context.py", line 341, in expired return datetime.datetime.utcnow() > self.expires_at ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: can't compare offset-naive and offset-aware datetimes ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information on line 341 in interactions\models\internal\context.py, you could try replacing the line with ```py return datetime.datetime.utcnow().timestamp() > self.expires_at.timestamp() ``` since i dont think timezone matters here (EDIT this does not work it always returns True after 3 seconds)
2024-03-01T23:19:05
interactions-py/interactions.py
1,638
interactions-py__interactions.py-1638
[ "1635" ]
05cbcc8d07d27c27eba19bb2b7b0d2dae202b85e
diff --git a/interactions/models/discord/user.py b/interactions/models/discord/user.py --- a/interactions/models/discord/user.py +++ b/interactions/models/discord/user.py @@ -239,12 +239,18 @@ def guilds(self) -> List["Guild"]: """The guilds the user is in.""" return list(filter(None, (self._client.cache.get_guild(guild_id) for guild_id in self._guild_ids))) - async def edit(self, *, username: Absent[str] = MISSING, avatar: Absent[UPLOADABLE_TYPE] = MISSING) -> None: + async def edit( + self, + *, + username: Absent[str] = MISSING, + avatar: Absent[UPLOADABLE_TYPE] = MISSING, + banner: Absent[UPLOADABLE_TYPE] = MISSING, + ) -> None: """ Edit the client's user. - You can either change the username, or avatar, or both at once. - `avatar` may be set to `None` to remove your bot's avatar + You can change the username, avatar, and banner, or any combination of the three. + `avatar` and `banner` may be set to `None` to remove your bot's avatar/banner ??? Hint "Example Usage:" ```python @@ -258,6 +264,7 @@ async def edit(self, *, username: Absent[str] = MISSING, avatar: Absent[UPLOADAB Args: username: The username you want to use avatar: The avatar to use. Can be a image file, path, or `bytes` (see example) + banner: The banner to use. Can be a image file, path, or `bytes` Raises: TooManyChanges: If you change the profile too many times @@ -270,6 +277,10 @@ async def edit(self, *, username: Absent[str] = MISSING, avatar: Absent[UPLOADAB payload["avatar"] = to_image_data(avatar) elif avatar is None: payload["avatar"] = None + if banner: + payload["banner"] = to_image_data(banner) + elif banner is None: + payload["banner"] = None try: resp = await self._client.http.modify_client_user(payload)
[FEAT] Allow for changing bot banner for current bot ### Problem Description Bots recently gained support for bots to have banners. Needless to say, you should be able to set a bot's banner through interactions.py. ### Proposed Solution Add a `banner` attribute to `ClientUser.edit` and its respective functions. This should be relatively simple, since it's similar to `avatar`. ### Alternatives Considered _No response_ ### Additional Information Discord docs technically don't reflect this, but it's in [discord.py](https://github.com/Rapptz/discord.py/commit/82d13e7b497ac8983a3567a715d5c528b2a895ba) already, and has been publicly announced in the DDevs server. ### Code of Conduct - [X] I agree to follow the contribution requirements.
2024-03-19T04:27:26
interactions-py/interactions.py
1,643
interactions-py__interactions.py-1643
[ "1642" ]
57e4f771cd74e96fd1d882783ba4def710004f1b
diff --git a/interactions/api/voice/recorder.py b/interactions/api/voice/recorder.py --- a/interactions/api/voice/recorder.py +++ b/interactions/api/voice/recorder.py @@ -217,10 +217,10 @@ def process_data(self, raw_audio: RawInputAudio) -> None: if raw_audio.ssrc not in self.user_timestamps: if last_timestamp := self.audio.last_timestamps.get(raw_audio.user_id, None): - diff = raw_audio.timestamp - last_timestamp - silence = int(diff * decoder.sample_rate) + silence = raw_audio.timestamp - last_timestamp + frames = int(silence * decoder.sample_rate) log.debug( - f"{self.state.channel.id}::{raw_audio.user_id} - User rejoined, adding {silence} silence frames ({diff} seconds)" + f"{self.state.channel.id}::{raw_audio.user_id} - User rejoined, adding {frames} silence frames ({silence} seconds)" ) else: silence = 0
[BUG] Recorder Memory Usage ### Library Version 5.11.0 ### Describe the Bug Leaving and rejoining a voice channel with a bot recording causes enormous RAM usage and the bot recorder to stop functioning properly or the bot to crash completely (from out-of-memory). This is because the calculation for how many silence frames to insert is wrong (multiplies by sample rate twice). ### Steps to Reproduce Use whatever you prefer and start monitoring your system memory usage. Join a voice channel and have a bot join it to record. Talk and then disconnect from the voice channel. Wait 3-5 seconds and rejoin the voice channel. Memory usage will spike into the 10-20 GB range or the bot will crash. The exact outcome and how long to wait between rejoining the voice channel really depends on the amount of memory available on your system. If you wait too long and don't have enough memory, you likely will see "Error while recording: " in the logs because Python was smart enough to not let you try to allocate that much memory. I have 64 GB of RAM, of which 32 GB is available to the bot, and it will spike up to 25 GB consistently or just crash. When it crashes, you will see "Killed" in the terminal and the exit code will be 137, which is an out-of-memory exit code. ### Expected Results The memory will not have a substantial spike and the bot will not crash. ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
2024-03-25T22:15:58
interactions-py/interactions.py
1,648
interactions-py__interactions.py-1648
[ "1644" ]
b96eecec3c90ff3e07578aec6598bbaa491e62a2
diff --git a/interactions/client/smart_cache.py b/interactions/client/smart_cache.py --- a/interactions/client/smart_cache.py +++ b/interactions/client/smart_cache.py @@ -794,7 +794,7 @@ async def place_voice_state_data( if old_state := self.get_voice_state(user_id): # noinspection PyProtectedMember - if user_id in old_state.channel._voice_member_ids: + if old_state.channel is not None and user_id in old_state.channel._voice_member_ids: # noinspection PyProtectedMember old_state.channel._voice_member_ids.remove(user_id)
[BUG] Occasionally VoiceChannel is None ### Library Version 5.11.0 ### Describe the Bug Occasionally, the built in cache receives None for VoiceChannel when trying to access `_voice_member_ids`. I believe this has to do with not having the `VOICE_STATE_UPDATE` intent. ### Steps to Reproduce Run an interactions.py bot with Sentry enabled and inspect the error log occasionally. ### Expected Results Ideally, the library isn't sending errors to Sentry :) ### Minimal Reproducible Code _No response_ ### Traceback ```py AttributeError: 'NoneType' object has no attribute '_voice_member_ids' File "interactions/client/smart_cache.py", line 767, in place_voice_state_data if user_id in old_state.channel._voice_member_ids: ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
Ah, I see what's happening here. Shouldn't be too hard to fix.
2024-03-31T12:49:00
interactions-py/interactions.py
1,654
interactions-py__interactions.py-1654
[ "1653" ]
b96eecec3c90ff3e07578aec6598bbaa491e62a2
diff --git a/interactions/models/discord/components.py b/interactions/models/discord/components.py --- a/interactions/models/discord/components.py +++ b/interactions/models/discord/components.py @@ -351,13 +351,13 @@ def from_object(cls, obj: DiscordObject) -> "SelectDefaultValues": """Create a default value from a discord object.""" match obj: case d_models.User(): - return cls(id=obj.id, type="user") + return cls(client=obj._client, id=obj.id, type="user") case d_models.Member(): - return cls(id=obj.id, type="user") + return cls(client=obj._client, id=obj.id, type="user") case d_models.BaseChannel(): - return cls(id=obj.id, type="channel") + return cls(client=obj._client, id=obj.id, type="channel") case d_models.Role(): - return cls(id=obj.id, type="role") + return cls(client=obj._client, id=obj.id, type="role") case _: raise TypeError( f"Cannot convert {obj} of type {type(obj)} to a SelectDefaultValues - Expected User, Channel, Member, or Role"
[BUG] SelectDefaultValues.from_object needs client positional argument ### Library Version 5.11.0 ### Describe the Bug When I try to create the `SelectDefaultValues` with the class method `from_object`, the init function missing a positional argument `client`, which needs to be passed in. ### Steps to Reproduce 1. Use the following code: ```python interactions.models.discord.components.SelectDefaultValues.from_object(ctx.author) ``` 2. It will raise the error immediately ### Expected Results It will raise an error to indicate that `client` positional argument is missing. ### Minimal Reproducible Code ```python import interactions bot = interactions.Client(intents=Intents.DEFAULT) @interactions.slash_command( name="test", description="test" ) async def test_cmd(ctx: interactions.SlashContext): component = interactions.UserSelectMenu(max_values=3, default_values=[interactions.models.discord.components.SelectDefaultValues.from_object(ctx.author)]) # Or the following code is the same as above # component = interactions.UserSelectMenu(max_values=3, default_values=[ctx.author]) await ctx.send("Test", components=[component]) bot.start("TOKEN") ``` ### Traceback ``` Traceback (most recent call last): File "/venv/lib/python3.11/site-packages/interactions/client/client.py", line 1900, in __dispatch_interaction response = await callback ^^^^^^^^^^^^^^ File "/venv/lib/python3.11/site-packages/interactions/client/client.py", line 1771, in _run_slash_command return await command(ctx, **ctx.kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/venv/lib/python3.11/site-packages/interactions/models/internal/command.py", line 132, in __call__ await self.call_callback(self.callback, context) File "/venv/lib/python3.11/site-packages/interactions/models/internal/application_commands.py", line 833, in call_callback return await self.call_with_binding(callback, ctx, *new_args, **new_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/venv/lib/python3.11/site-packages/interactions/models/internal/callback.py", line 43, in call_with_binding return await callback(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/extensions/template.py", line 22, in test_cmd ) File "/venv/lib/python3.11/site-packages/interactions/models/discord/components.py", line 351, in from_object return cls(id=obj.id, type="user") ^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: SelectDefaultValues.__init__() missing 1 required positional argument: 'client' ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information I did a very simple hack in the library but it has a very limited usage: ```python @attrs.define(eq=False, order=False, hash=False, slots=False) class SelectDefaultValues(DiscordObject): id: Snowflake """ID of a user, role, or channel""" type: str """Type of value that id represents. Either "user", "role", or "channel""" @classmethod def from_object(cls, obj: DiscordObject, client) -> "SelectDefaultValues": """Create a default value from a discord object.""" match obj: case d_models.User(): return cls(client=client, id=obj.id, type="user") case d_models.Member(): return cls(client=client, id=obj.id, type="user") case d_models.BaseChannel(): return cls(client=client, id=obj.id, type="channel") case d_models.Role(): return cls(client=client, id=obj.id, type="role") case _: raise TypeError( f"Cannot convert {obj} of type {type(obj)} to a SelectDefaultValues - Expected User, Channel, Member, or Role" ) ``` Then I can use it as: ```python import interactions bot = interactions.Client(intents=Intents.DEFAULT) @interactions.slash_command( name="test", description="test" ) async def test_cmd(ctx: interactions.SlashContext): component = interactions.UserSelectMenu(max_values=3, default_values=[interactions.models.discord.components.SelectDefaultValues.from_object(ctx.author, client)]) bot.start("TOKEN") ```
2024-04-07T01:12:03
interactions-py/interactions.py
1,659
interactions-py__interactions.py-1659
[ "1657" ]
b96eecec3c90ff3e07578aec6598bbaa491e62a2
diff --git a/interactions/models/discord/enums.py b/interactions/models/discord/enums.py --- a/interactions/models/discord/enums.py +++ b/interactions/models/discord/enums.py @@ -103,9 +103,10 @@ def __iter__(cls) -> Iterator: def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1) -> "DistinctFlag": # To automatically convert string values into ints (eg for permissions) + kwargs = {"names": names} if names else {} try: int_value = int(value) - return super().__call__(int_value, names, module=module, qualname=qualname, type=type, start=start) + return super().__call__(int_value, module=module, qualname=qualname, type=type, start=start, **kwargs) except (TypeError, ValueError): return _return_cursed_enum(cls, value)
[BUG] Class `Intents` received an invalid and unexpected value (Python 3.12.3) ### Library Version 5.11.0 ### Describe the Bug Since yesterday, my console when running the bot is filled with these messages. I am using Python 3.12.3 on MacOS 14.4.1 Class `Intents` received an invalid and unexpected value `33026`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues ### Steps to Reproduce 1. Install Python 3.12.3 2. Run your interactions.py bot ### Expected Results The bot should function normally and my console isn't polluted with all these errors ### Minimal Reproducible Code _No response_ ### Traceback ```bash Class `Intents` received an invalid and unexpected value `-33027`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33028`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `4`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33032`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `8`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33040`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `16`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33056`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `32`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33088`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `64`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `128`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33280`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `512`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33792`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1024`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-34816`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `2048`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-36864`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `4096`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-40960`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `8192`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-49152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `16384`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-65536`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `65536`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-131072`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1048576`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-1179648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `2097152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-3276800`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3145728`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3211264`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3227648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3235840`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3239936`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3241984`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243008`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243520`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243712`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243744`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243760`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243768`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243772`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243773`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `33026`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33028`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `4`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33032`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `8`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33040`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `16`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33056`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `32`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33088`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `64`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `128`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33280`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `512`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-33792`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1024`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-34816`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `2048`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-36864`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `4096`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-40960`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `8192`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-49152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `16384`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-65536`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `65536`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-131072`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `1048576`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-1179648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `2097152`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `-3276800`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3145728`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3211264`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3227648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3235840`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3239936`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3241984`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243008`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243520`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243648`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243712`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243744`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243760`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243768`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243772`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `3243773`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `Intents` received an invalid and unexpected value `32768`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues GUILD intent has not been enabled; this is very likely to cause errors Starting bot and loading exts.. All extensions are loaded! 5/5 Successfully loaded: credits, instagram, tiktok, twitter, youtube Class `UserFlags` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `UserFlags` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `UserFlags` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `UserFlags` received an invalid and unexpected value `4194432`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues Class `ApplicationFlags` received an invalid and unexpected value `8953856`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information _No response_
The temporary workaround I am using for now is switching back to Python 3.11 I put a traceback on it ```py Traceback (most recent call last): File "D:\Scratch\Documents\Git\June\Lib\site-packages\interactions\models\discord\enums.py", line 112, in __call__ return super().__call__(int_value, names, module=module, qualname=qualname, type=type, start=start) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\...\AppData\Local\Programs\Python\Python312\Lib\enum.py", line 757, in __call__ return cls.__new__(cls, value) ^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\...\AppData\Local\Programs\Python\Python312\Lib\enum.py", line 1179, in __new__ raise exc File "C:\Users\...\AppData\Local\Programs\Python\Python312\Lib\enum.py", line 1156, in __new__ result = cls._missing_(value) ^^^^^^^^^^^^^^^^^^^^ File "C:\Users\...\AppData\Local\Programs\Python\Python312\Lib\enum.py", line 1422, in _missing_ raise ValueError( ValueError: (0, None) is not a valid Intents Class `Intents` received an invalid and unexpected value `0`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues ``` There's some migration issues with using Python 3.12.x, I suggest working with 3.11.x for now. I have also found problems on my end using the newer version This seems to be an issue with 3.12.3 specifically. We're looking into it. Reproduced on 3.12.3, elevating issue Will be fixed by #1658. Use Python 3.12.2 or a 3.11 version in the meantime.
2024-04-18T02:42:28
interactions-py/interactions.py
1,670
interactions-py__interactions.py-1670
[ "1596" ]
7e01223cd7600378c405b99ffe35d48b1fa5ed84
diff --git a/interactions/models/discord/enums.py b/interactions/models/discord/enums.py --- a/interactions/models/discord/enums.py +++ b/interactions/models/discord/enums.py @@ -1017,6 +1017,12 @@ class AuditLogEventType(CursedIntEnum): BLOCKED_PHISHING_LINK = 180 SERVER_GUIDE_CREATE = 190 SERVER_GUIDE_UPDATE = 191 + VOICE_CHANNEL_STATUS_CREATE = 192 + VOICE_CHANNEL_STATUS_DELETE = 193 + CLYDE_AI_PROFILE_UPDATE = 194 + GUILD_SCHEDULED_EVENT_EXCEPTION_CREATE = 200 + GUILD_SCHEDULED_EVENT_EXCEPTION_UPDATE = 201 + GUILD_SCHEDULED_EVENT_EXCEPTION_DELETE = 202 class AutoModTriggerType(CursedIntEnum):
[BUG] AuditLogEventType 192 missing ### Library Version stable ### Describe the Bug Seeing the following error in bot logs: Class `AuditLogEventType` received an invalid and unexpected value `192`, a new enum item will be created to represent this value. Please update interactions.py or report this issue on GitHub - https://github.com/interactions-py/interactions.py/issues ### Steps to Reproduce TBD, trying to determine which audit log event corresponds to ID 192 so that the enums can be updated. ### Expected Results No AuditLogEventType event code errors shown in bot log output. ### Minimal Reproducible Code _No response_ ### Traceback _No response_ ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. - [X] I have attempted to debug this myself, and I believe this issue is with the library ### Additional Information Enum needs to be set, but need to first identify what event 192 is. Discord Docs don't have anything listed, either. https://discord.com/developers/docs/resources/audit-log#audit-log-change-object-audit-log-change-exceptions https://github.com/interactions-py/interactions.py/blob/stable/interactions/models/discord/enums.py#L992
Opening this issue to track -- I'm trying to identify which event 192 corresponds to and will open a MR with the fix once identified. @ryandiamond23 192 = channel status added/changed 193 = channel status removed ![image](https://github.com/interactions-py/interactions.py/assets/51890392/acc48e84-f42d-4a66-8244-8bec59eac940) here is what's from the canary dist that isn't covered by the enum if someone would like to contribute it. The names may change in the future like they have in the past (we don't match very accurately anymore) ```js { VOICE_CHANNEL_STATUS_CREATE: 192, VOICE_CHANNEL_STATUS_DELETE: 193, CLYDE_AI_PROFILE_UPDATE: 194, GUILD_SCHEDULED_EVENT_EXCEPTION_CREATE: 200, GUILD_SCHEDULED_EVENT_EXCEPTION_UPDATE: 201, GUILD_SCHEDULED_EVENT_EXCEPTION_DELETE: 202 ```
2024-04-30T09:07:38
celery/celery
423
celery__celery-423
[ "422" ]
523598f39fcd546359a5176ed462858d47ff8297
diff --git a/celery/backends/base.py b/celery/backends/base.py --- a/celery/backends/base.py +++ b/celery/backends/base.py @@ -1,5 +1,6 @@ """celery.backends.base""" import time +import sys from datetime import timedelta @@ -8,8 +9,9 @@ from celery.utils import timeutils from celery.utils.serialization import pickle, get_pickled_exception from celery.utils.serialization import get_pickleable_exception +from celery.utils.serialization import create_exception_cls from celery.datastructures import LocalCache - +from celery.app import app_or_default class BaseBackend(object): """Base backend class.""" @@ -33,7 +35,7 @@ def prepare_expires(self, value, type=None): return value def encode_result(self, result, status): - if status in self.EXCEPTION_STATES: + if status in self.EXCEPTION_STATES and isinstance(result, Exception): return self.prepare_exception(result) else: return self.prepare_value(result) @@ -68,11 +70,18 @@ def mark_as_revoked(self, task_id): def prepare_exception(self, exc): """Prepare exception for serialization.""" - return get_pickleable_exception(exc) + if (app_or_default().conf["CELERY_RESULT_SERIALIZER"] in ("pickle", "yaml")): + return get_pickleable_exception(exc) + return { + "exc_type": type(exc).__name__, + "exc_message": str(exc), + } def exception_to_python(self, exc): """Convert serialized exception to Python exception.""" - return get_pickled_exception(exc) + if (app_or_default().conf["CELERY_RESULT_SERIALIZER"] in ("pickle", "yaml")): + return get_pickled_exception(exc) + return create_exception_cls(exc["exc_type"].encode("utf-8"), sys.modules[__name__]) def prepare_value(self, result): """Prepare value for storage."""
Json task result serializer + Exception = Fail When a task raises an exception, celeryd gets an exception in kombu: ``` [2011-06-28 18:34:31,540: ERROR/MainProcess] Task tasks.error[5c493298-b7e9-4535-a697-e271298b8b18] raised exception: TypeError('Exception() is not JSON serializable',) Traceback (most recent call last): File "<virtualenv>/lib/python2.7/site-packages/celery/worker/job.py", line 109, in execute_safe return self.execute(*args, **kwargs) File "<virtualenv>/lib/python2.7/site-packages/celery/worker/job.py", line 127, in execute return super(WorkerTaskTrace, self).execute() File "<virtualenv>/lib/python2.7/site-packages/celery/execute/trace.py", line 76, in execute retval = self._trace() File "<virtualenv>/lib/python2.7/site-packages/celery/execute/trace.py", line 90, in _trace r = handler(trace.retval, trace.exc_type, trace.tb, trace.strtb) File "<virtualenv>/lib/python2.7/site-packages/celery/worker/job.py", line 155, in handle_failure exc = self.task.backend.mark_as_failure(self.task_id, exc, strtb) File "<virtualenv>/lib/python2.7/site-packages/celery/backends/base.py", line 45, in mark_as_failure traceback=traceback) File "<virtualenv>/lib/python2.7/site-packages/celery/backends/base.py", line 157, in store_result return self._store_result(task_id, result, status, traceback, **kwargs) File "<virtualenv>/lib/python2.7/site-packages/celery/backends/amqp.py", line 128, in _store_result "traceback": traceback}) File "<virtualenv>/lib/python2.7/site-packages/kombu/connection.py", line 230, in _insured return fun(*args, **kwargs) File "<virtualenv>/lib/python2.7/site-packages/celery/backends/amqp.py", line 101, in _publish_result self._create_producer(task_id, channel).publish(meta) File "<virtualenv>/lib/python2.7/site-packages/kombu/messaging.py", line 124, in publish compression, headers) File "<virtualenv>/lib/python2.7/site-packages/kombu/messaging.py", line 147, in _prepare body) = encode(body, serializer=serializer) File "<virtualenv>/lib/python2.7/site-packages/kombu/serialization.py", line 119, in encode payload = encoder(data) File "<virtualenv>/lib/python2.7/site-packages/anyjson/__init__.py", line 129, in <lambda> serialize = lambda value: implementation.serialize(value) File "<virtualenv>/lib/python2.7/site-packages/anyjson/__init__.py", line 91, in serialize raise TypeError(*exc.args) TypeError: Exception() is not JSON serializable ```
2011-06-29T14:53:04
celery/celery
450
celery__celery-450
[ "450" ]
9497342cfa5f01674402415346d6d4d39517711f
diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py --- a/celery/concurrency/processes/__init__.py +++ b/celery/concurrency/processes/__init__.py @@ -6,8 +6,6 @@ import platform import signal as _signal -from os import kill as _kill - from celery.concurrency.base import BasePool from celery.concurrency.processes.pool import Pool, RUN @@ -17,6 +15,8 @@ # *and its children* (if any). from celery.concurrency.processes import _win _kill = _win.kill_processtree # noqa +else: + from os import kill as _kill class TaskPool(BasePool):
os.kill is not available in windows before python 2.7 As per the topic, the current celery implementation (>=2.3.0) crashes on windows using python 2.5 and 2.6, because it uses os.kill which is not available in windows before python 2.7
Thanks! What is the traceback? Sorry I don't have it readily available, but I think it crashed on attempted import in concurrency. I'll try to get a traceback for ya ``` Traceback (most recent call last):  File "run_celeryd.py", line 9, in <module>    celery.bin.celeryd.main()  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\bin\celeryd.py", line 187, in main    worker.execute_from_commandline()  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\bin\base.py", line 72, in execute_from_commandline    return self.handle_argv(prog_name, argv[1:])  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\bin\base.py", line 100, in handle_argv    return self.run(*args, **vars(options))  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\bin\celeryd.py", line 95, in run    kwargs.get("pool") or self.app.conf.CELERYD_POOL)  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\concurrency\__init__.py", line 13, in get_implementation    return get_cls_by_name(cls, ALIASES)  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\utils\__init__.py", line 313, in get_cls_by_name    module = imp(module_name)  File "c:\python25\lib\site-packages\importlib-1.0.2-py2.5.egg\importlib\__init__.py", line 37, in import_module    __import__(name)  File "c:\python25\lib\site-packages\celery-2.3.1-py2.5.egg\celery\concurrency\processes\__init__.py", line 9, in <module>    from os import kill as _kill ImportError: cannot import name kill ```
2011-08-27T21:57:56
celery/celery
1,206
celery__celery-1206
[ "1203" ]
707fdb39d057bfc5179626113fb77aeca56a6ac6
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py --- a/celery/backends/mongodb.py +++ b/celery/backends/mongodb.py @@ -96,7 +96,10 @@ def _get_connection(self): # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. args = [self.mongodb_host] - kwargs = {'max_pool_size': self.mongodb_max_pool_size} + kwargs = { + 'max_pool_size': self.mongodb_max_pool_size, + 'ssl': self.app.conf.BROKER_USE_SSL + } if isinstance(self.mongodb_host, string_t) \ and not self.mongodb_host.startswith('mongodb://'): args.append(self.mongodb_port)
MongoDB and BROKER_USE_SSL=True I've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well. ``` python [2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker... [2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data. [2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection... Traceback (most recent call last): File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 392, in start self.reset_connection() File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 741, in reset_connection self.connection, on_decode_error=self.on_decode_error, File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py", line 291, in __init__ queues or self.app.amqp.queues.consume_from.values(), **kw File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 338, in __init__ self.revive(self.channel) File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 350, in revive self.declare() File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 360, in declare queue.declare() File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 471, in declare self.queue_declare(nowait, passive=False) File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 497, in queue_declare nowait=nowait) File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py", line 398, in queue_declare return queue, self._size(queue), 0 File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 76, in _size return self.client.messages.find({'queue': queue}).count() File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 204, in client self._client = self._open() File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 133, in _open mongoconn = Connection(host=hostname) File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py", line 180, in __init__ max_pool_size, document_class, tz_aware, _connect, **kwargs) File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py", line 269, in __init__ raise ConnectionFailure(str(e)) ConnectionFailure: [Errno 104] Connection reset by peer [2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker... [2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data. [2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection... ``` Problem seems to be generated by this line https://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135 which should take ssl=True parameter for SSL connections. I know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library.
2013-02-23T09:59:42
celery/celery
1,769
celery__celery-1769
[ "1768" ]
3c4860d2208ae07fc1f5f07d7e9ae6c79919e9c4
diff --git a/celery/apps/worker.py b/celery/apps/worker.py --- a/celery/apps/worker.py +++ b/celery/apps/worker.py @@ -315,6 +315,9 @@ def on_SIGINT(worker): def _reload_current_worker(): + platforms.close_open_fds([ + sys.__stdin__, sys.__stdout__, sys.__stderr__, + ]) os.execv(sys.executable, [sys.executable] + sys.argv)
Sending SIGHUP leaks file handles When sending SIGHUP to the Celery master process, it leaks all of its previously open file handles when calling exec. This is a regression introduced in 3.1 by 118b300fcad4e6ffb0178fc00cf9fe26075101a5 (originally fixed in 803655b79ccb0403f47cfcd2cfa5a6ed66301cbc for #1270). This additionally causes Celery to crash after enough HUPs (if the open file limit is larger than 1024): ``` ERROR celery.bootsteps Error on stopping Pool: ValueError('filedescriptor out of range in select()',) Traceback (most recent call last): File "celery/bootsteps.py", line 155, in send_all fun(parent, *args) File "celery/bootsteps.py", line 377, in stop return self.obj.stop() File "celery/concurrency/base.py", line 119, in stop self.on_stop() File "celery/concurrency/prefork.py", line 140, in on_stop self._pool.join() File "billiard/pool.py", line 1523, in join stop_if_not_current(self._result_handler) File "billiard/pool.py", line 148, in stop_if_not_current thread.stop(timeout) File "billiard/pool.py", line 493, in stop self.on_stop_not_started() File "celery/concurrency/asynpool.py", line 301, in on_stop_not_started join_exited_workers(shutdown=True) File "billiard/pool.py", line 1109, in _join_exited_workers self.process_flush_queues(worker) File "celery/concurrency/asynpool.py", line 1082, in process_flush_queues readable, _, again = _select(fds, None, fds, timeout=0.01) File "celery/concurrency/asynpool.py", line 141, in _select r, w, e = select.select(readers, writers, err, timeout) ValueError: filedescriptor out of range in select() ```
2014-01-03T23:56:52
celery/celery
1,834
celery__celery-1834
[ "1785" ]
59e44ae6300e5b39b3306bc2cdc76a0b85b3d418
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py --- a/celery/concurrency/asynpool.py +++ b/celery/concurrency/asynpool.py @@ -501,7 +501,7 @@ def verify_process_alive(proc): if proc._is_alive() and proc in waiting_to_start: assert proc.outqR_fd in fileno_to_outq assert fileno_to_outq[proc.outqR_fd] is proc - assert proc.outqR_fd in hub.readers + assert proc.outqR_fd in hub.readers, "%s.outqR_fd=%s not in hub.readers !" % (proc, proc.outqR_fd) error('Timed out waiting for UP message from %r', proc) os.kill(proc.pid, 9) @@ -570,6 +570,15 @@ def on_process_down(proc): if inq: busy_workers.discard(inq) hub_remove(proc.sentinel) + waiting_to_start.discard(proc) + self._active_writes.discard(proc.inqW_fd) + hub_remove(proc.inqW_fd) + hub_remove(proc.outqR_fd) + if proc.synqR_fd: + hub_remove(proc.synqR_fd) + if proc.synqW_fd: + self._active_writes.discard(proc.synqW_fd) + hub_remove(proc.synqW_fd) self.on_process_down = on_process_down def _create_write_handlers(self, hub, @@ -966,7 +975,7 @@ def on_process_alive(self, pid): try: proc = next(w for w in self._pool if w.pid == pid) except StopIteration: - # process already exited :( this will be handled elsewhere. + logger.warning("process with pid=%s already exited :( - handling this elsewhere ...", pid) return assert proc.inqW_fd not in self._fileno_to_inq assert proc.inqW_fd not in self._all_inqueues
"assert proc.outqR_fd in hub.readers" AssertionError ``` [2014-01-13 15:06:53,047] pid=33970/MainProcess - ERROR - celery.worker - Unrecoverable error: AssertionError() Traceback (most recent call last): File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/__init__.py", line 206, in start self.blueprint.start(self) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start step.start(parent) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 373, in start return self.obj.start() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 270, in start blueprint.start(self) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start step.start(parent) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 786, in start c.loop(*c.loop_args()) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/loops.py", line 71, in asynloop next(loop) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 288, in create_loop poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 151, in fire_timers entry() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/timer.py", line 64, in __call__ return self.fun(*self.args, **self.kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/concurrency/asynpool.py", line 504, in verify_process_alive assert proc.outqR_fd in hub.readers AssertionError [2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Hub... [2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Pool... [2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Consumer... [2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Stopping Consumer... [2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Connection... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Events... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Mingle... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Tasks... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Control... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Gossip... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Heart... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing event loop... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping event loop... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Heart... [2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Gossip... [2014-01-13 15:06:53,050] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Control... ``` I don't have a isolated testcase yet. Does the cause look like something obvious ?
Tested with: - celery/kombu@ffa90945bf06ba8b9269b4a36019baad0ac57793 - celery/billiard@c29c4f7adbd0f7f4544c05fb9777800616e89d2f - celery/celery@ceaf7aba36eae78af852eb5ca703c81091b52f23 [I too](https://github.com/celery/kombu/issues/305) am getting a similar issue. Which transport are you using? It was redis. I will post more details when I get the chance I am getting this issues with rabbitmq/amqp. I'm getting it too, on Redis backend. Celery Beat continues, worker restarts but seems to be idle (no output from tasks in log, unlike when I stop entire instance, do flushall on redis and start celery again, then it starts working again). The problem code is in https://github.com/celery/celery/blob/master/celery/concurrency/asynpool.py#L501 Do you guys use CELERYD_MAX_TASKS_PER_CHILD with low value ? Yes! In fact I set it to 1, otherwise I had massive memory leaks under heavy loads (lots of workers, big tasks, not releasing memory to OS causing memory use creeping up and then OOM killer incoming) etc. We have it set to 1000. Yep it is set to 2500 for us.
2014-01-30T13:06:46
celery/celery
1,899
celery__celery-1899
[ "1786", "1786" ]
5254f9a75b3a2c677fd7165de10fd09bbfd48232
diff --git a/celery/backends/database/__init__.py b/celery/backends/database/__init__.py --- a/celery/backends/database/__init__.py +++ b/celery/backends/database/__init__.py @@ -8,17 +8,21 @@ """ from __future__ import absolute_import +import logging +from contextlib import contextmanager from functools import wraps from celery import states +from celery.backends.base import BaseBackend from celery.exceptions import ImproperlyConfigured from celery.five import range from celery.utils.timeutils import maybe_timedelta -from celery.backends.base import BaseBackend +from .models import Task +from .models import TaskSet +from .session import SessionManager -from .models import Task, TaskSet -from .session import ResultSession +logger = logging.getLogger(__name__) __all__ = ['DatabaseBackend'] @@ -33,7 +37,19 @@ def _sqlalchemy_installed(): return sqlalchemy _sqlalchemy_installed() -from sqlalchemy.exc import DatabaseError, OperationalError +from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError, InvalidRequestError, IntegrityError +from sqlalchemy.orm.exc import StaleDataError + + +@contextmanager +def session_cleanup(session): + try: + yield + except Exception: + session.rollback() + raise + finally: + session.close() def retry(fun): @@ -45,7 +61,15 @@ def _inner(*args, **kwargs): for retries in range(max_retries): try: return fun(*args, **kwargs) - except (DatabaseError, OperationalError): + except ( + DatabaseError, OperationalError, ResourceClosedError, StaleDataError, InvalidRequestError, + IntegrityError + ): + logger.warning( + "Failed operation %s. Retrying %s more times.", + fun.__name__, max_retries - retries - 1, + exc_info=True, + ) if retries + 1 >= max_retries: raise @@ -83,8 +107,8 @@ def __init__(self, dburi=None, expires=None, 'Missing connection string! Do you have ' 'CELERY_RESULT_DBURI set to a real value?') - def ResultSession(self): - return ResultSession( + def ResultSession(self, session_manager=SessionManager()): + return session_manager.session_factory( dburi=self.dburi, short_lived_sessions=self.short_lived_sessions, **self.engine_options @@ -95,8 +119,9 @@ def _store_result(self, task_id, result, status, traceback=None, max_retries=3, **kwargs): """Store return value and status of an executed task.""" session = self.ResultSession() - try: - task = session.query(Task).filter(Task.task_id == task_id).first() + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] if not task: task = Task(task_id) session.add(task) @@ -106,83 +131,70 @@ def _store_result(self, task_id, result, status, task.traceback = traceback session.commit() return result - finally: - session.close() @retry def _get_task_meta_for(self, task_id): """Get task metadata for a task by id.""" session = self.ResultSession() - try: - task = session.query(Task).filter(Task.task_id == task_id).first() - if task is None: + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] + if not task: task = Task(task_id) task.status = states.PENDING task.result = None return task.to_dict() - finally: - session.close() @retry def _save_group(self, group_id, result): """Store the result of an executed group.""" session = self.ResultSession() - try: + with session_cleanup(session): group = TaskSet(group_id, result) session.add(group) session.flush() session.commit() return result - finally: - session.close() @retry def _restore_group(self, group_id): """Get metadata for group by id.""" session = self.ResultSession() - try: + with session_cleanup(session): group = session.query(TaskSet).filter( TaskSet.taskset_id == group_id).first() if group: return group.to_dict() - finally: - session.close() @retry def _delete_group(self, group_id): """Delete metadata for group by id.""" session = self.ResultSession() - try: + with session_cleanup(session): session.query(TaskSet).filter( TaskSet.taskset_id == group_id).delete() session.flush() session.commit() - finally: - session.close() @retry def _forget(self, task_id): """Forget about result.""" session = self.ResultSession() - try: + with session_cleanup(session): session.query(Task).filter(Task.task_id == task_id).delete() session.commit() - finally: - session.close() def cleanup(self): """Delete expired metadata.""" session = self.ResultSession() expires = self.expires now = self.app.now() - try: + with session_cleanup(session): session.query(Task).filter( Task.date_done < (now - expires)).delete() session.query(TaskSet).filter( TaskSet.date_done < (now - expires)).delete() session.commit() - finally: - session.close() def __reduce__(self, args=(), kwargs={}): kwargs.update( diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py --- a/celery/backends/database/session.py +++ b/celery/backends/database/session.py @@ -8,58 +8,55 @@ """ from __future__ import absolute_import -from collections import defaultdict -from multiprocessing.util import register_after_fork +from billiard.util import register_after_fork from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import NullPool ResultModelBase = declarative_base() -_SETUP = defaultdict(lambda: False) -_ENGINES = {} -_SESSIONS = {} - -__all__ = ['ResultSession', 'get_engine', 'create_session'] - - -class _after_fork(object): - registered = False - - def __call__(self): - self.registered = False # child must reregister - for engine in list(_ENGINES.values()): - engine.dispose() - _ENGINES.clear() - _SESSIONS.clear() -after_fork = _after_fork() - - -def get_engine(dburi, **kwargs): - try: - return _ENGINES[dburi] - except KeyError: - engine = _ENGINES[dburi] = create_engine(dburi, **kwargs) - after_fork.registered = True - register_after_fork(after_fork, after_fork) - return engine - - -def create_session(dburi, short_lived_sessions=False, **kwargs): - engine = get_engine(dburi, **kwargs) - if short_lived_sessions or dburi not in _SESSIONS: - _SESSIONS[dburi] = sessionmaker(bind=engine) - return engine, _SESSIONS[dburi] - - -def setup_results(engine): - if not _SETUP['results']: - ResultModelBase.metadata.create_all(engine) - _SETUP['results'] = True - - -def ResultSession(dburi, **kwargs): - engine, session = create_session(dburi, **kwargs) - setup_results(engine) - return session() +__all__ = ['SessionManager'] + + +class SessionManager(object): + def __init__(self): + self._engines = {} + self._sessions = {} + self.forked = False + self.prepared = False + register_after_fork(self, self._after_fork) + + def _after_fork(self,): + self.forked = True + + def get_engine(self, dburi, **kwargs): + if self.forked: + try: + return self._engines[dburi] + except KeyError: + engine = self._engines[dburi] = create_engine(dburi, **kwargs) + return engine + else: + kwargs['poolclass'] = NullPool + return create_engine(dburi, **kwargs) + + def create_session(self, dburi, short_lived_sessions=False, **kwargs): + engine = self.get_engine(dburi, **kwargs) + if self.forked: + if short_lived_sessions or dburi not in self._sessions: + self._sessions[dburi] = sessionmaker(bind=engine) + return engine, self._sessions[dburi] + else: + return engine, sessionmaker(bind=engine) + + def prepare_models(self, engine): + if not self.prepared: + ResultModelBase.metadata.create_all(engine) + self.prepared = True + + def session_factory(self, dburi, **kwargs): + engine, session = self.create_session(dburi, **kwargs) + self.prepare_models(engine) + return session()
StaleDataError: UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched. I get this ``` Traceback (most recent call last): File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py", line 262, in trace_task uuid, retval, SUCCESS, request=task_request, File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/base.py", line 223, in store_result request=request, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 47, in _inner return fun(*args, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 99, in _store_result task = session.query(Task).filter(Task.task_id == task_id).first() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2321, in first ret = list(self[0:1]) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2188, in __getitem__ return list(res) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/loading.py", line 65, in instances fetch = cursor.fetchall() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 773, in fetchall self.cursor, self.context) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1026, in _handle_dbapi_exception util.reraise(*exc_info) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 767, in fetchall l = self.process_rows(self._fetchall_impl()) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 736, in _fetchall_impl self._non_result() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 741, in _non_result "This result object does not return rows. " ResourceClosedError: This result object does not return rows. It has been closed automatically. ``` And then this: ``` [2014-01-13 15:16:26,279] pid=35232/Worker-144 - WARNING - py.warnings - /home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py:343: RuntimeWarning: Exception raised outside body: StaleDataError("UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched.",): Traceback (most recent call last): File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py", line 262, in trace_task uuid, retval, SUCCESS, request=task_request, File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/base.py", line 223, in store_result request=request, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 47, in _inner return fun(*args, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 108, in _store_result session.commit() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 765, in commit self.transaction.commit() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 370, in commit self._prepare_impl() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 350, in _prepare_impl self.session.flush() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1879, in flush self._flush(objects) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1997, in _flush transaction.rollback(_capture_exception=True) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 57, in __exit__ compat.reraise(exc_type, exc_value, exc_tb) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1961, in _flush flush_context.execute() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 370, in execute rec.execute(self) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 523, in execute uow File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 59, in save_obj mapper, table, update) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 528, in _emit_update_statements (table.description, len(update), rows)) StaleDataError: UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched. ``` StaleDataError: UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched. I get this ``` Traceback (most recent call last): File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py", line 262, in trace_task uuid, retval, SUCCESS, request=task_request, File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/base.py", line 223, in store_result request=request, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 47, in _inner return fun(*args, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 99, in _store_result task = session.query(Task).filter(Task.task_id == task_id).first() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2321, in first ret = list(self[0:1]) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2188, in __getitem__ return list(res) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/loading.py", line 65, in instances fetch = cursor.fetchall() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 773, in fetchall self.cursor, self.context) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1026, in _handle_dbapi_exception util.reraise(*exc_info) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 767, in fetchall l = self.process_rows(self._fetchall_impl()) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 736, in _fetchall_impl self._non_result() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/engine/result.py", line 741, in _non_result "This result object does not return rows. " ResourceClosedError: This result object does not return rows. It has been closed automatically. ``` And then this: ``` [2014-01-13 15:16:26,279] pid=35232/Worker-144 - WARNING - py.warnings - /home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py:343: RuntimeWarning: Exception raised outside body: StaleDataError("UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched.",): Traceback (most recent call last): File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/app/trace.py", line 262, in trace_task uuid, retval, SUCCESS, request=task_request, File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/base.py", line 223, in store_result request=request, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 47, in _inner return fun(*args, **kwargs) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/backends/database/__init__.py", line 108, in _store_result session.commit() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 765, in commit self.transaction.commit() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 370, in commit self._prepare_impl() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 350, in _prepare_impl self.session.flush() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1879, in flush self._flush(objects) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1997, in _flush transaction.rollback(_capture_exception=True) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 57, in __exit__ compat.reraise(exc_type, exc_value, exc_tb) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 1961, in _flush flush_context.execute() File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 370, in execute rec.execute(self) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 523, in execute uow File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 59, in save_obj mapper, table, update) File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 528, in _emit_update_statements (table.description, len(update), rows)) StaleDataError: UPDATE statement on table 'celery_taskmeta' expected to update 1 row(s); 0 were matched. ```
Tested with: - celery/kombu@ffa90945bf06ba8b9269b4a36019baad0ac57793 - celery/billiard@c29c4f7adbd0f7f4544c05fb9777800616e89d2f - celery/celery@ceaf7aba36eae78af852eb5ca703c81091b52f23 This seems more of a database configuration error/internal database error to me, possibly a problem with a driver or with sqlalchemy itself. Any hints about this ? I seem to get this on both centos and ubuntu (mysql5.5 both). Tested with: - celery/kombu@ffa90945bf06ba8b9269b4a36019baad0ac57793 - celery/billiard@c29c4f7adbd0f7f4544c05fb9777800616e89d2f - celery/celery@ceaf7aba36eae78af852eb5ca703c81091b52f23 This seems more of a database configuration error/internal database error to me, possibly a problem with a driver or with sqlalchemy itself. Any hints about this ? I seem to get this on both centos and ubuntu (mysql5.5 both).
2014-03-01T12:02:36
celery/celery
1,970
celery__celery-1970
[ "1969" ]
b35569090c5cabfa784b00a68b55c7628fee813d
diff --git a/celery/app/task.py b/celery/app/task.py --- a/celery/app/task.py +++ b/celery/app/task.py @@ -313,7 +313,7 @@ class Task(object): #: :setting:`CELERY_ACKS_LATE` setting. acks_late = None - #: List/tuple of expected exceptions. + #: Tuple of expected exceptions. #: #: These are errors that are expected in normal operation #: and that should not be regarded as a real error by the worker.
Task.throws cannot be a list, misleading documentation The check at https://github.com/celery/celery/blob/b35569090c5cabfa784b00a68b55c7628fee813d/celery/worker/job.py#L456 throws this error when the `Task.throws` is a list: ``` shell TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types ``` Documentation on `Task.throws` is misleading by mentioning that `throws` can be a `List/tuple`: https://github.com/celery/celery/blob/b35569090c5cabfa784b00a68b55c7628fee813d/celery/app/task.py#L316-L322
2014-04-09T09:09:03
celery/celery
2,349
celery__celery-2349
[ "2348" ]
daf70d42ca1a65b68091e958f1e33053f9c9195b
diff --git a/celery/app/trace.py b/celery/app/trace.py --- a/celery/app/trace.py +++ b/celery/app/trace.py @@ -337,7 +337,8 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts): def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): - return trace_task((app or current_app).tasks[name], + app = app or current_app + return trace_task(app.tasks[name], uuid, args, kwargs, request, app=app, **opts) trace_task_ret = _trace_task_ret
Internal error when embedding app Celery 3.1.13 and 3.1.16 (latest release as of this writing) I'm wrapping the celery app inside a utility class, which constructs the app and the worker: ``` python self.celery = celery.Celery() self.worker = self.celery.WorkController(pool_cls='solo', queues=[self.queue_name]) self.celery.task(self._receive_callback, name=self.callback_task_name) ``` The utility class has a start() method which starts the worker like this: ``` python t = threading.Thread(target=self.worker.start) # Starting the worker in a daemonic thread so that it doesn't keep the process # alive when the main thread exits t.setDaemon(True) t.start() ``` When the embedded app receives the task it crashes with the following traceback: ``` python CRITICAL:celery.worker.job:Task [my_task_name][cfe87fb7-373d-4082-a72c-0f44d265cc9f] INTERNAL ERROR: AttributeError("'NoneType' object has no attribute 'loader'",) Traceback (most recent call last): File "/virtualenvdir/lib/python2.7/site-packages/celery/app/trace.py", line 333, in trace_task task.__trace__ = build_tracer(task.name, task, **opts) File "/virtualenvdir/lib/python2.7/site-packages/celery/app/trace.py", line 160, in build_tracer loader = loader or app.loader AttributeError: 'NoneType' object has no attribute 'loader' ``` I printed the stack trace from the exception handler in celery.app.trace.trace_task right before report_internal error is called and the error seems to be triggered in _trace_task_ret: ``` python def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): return trace_task((app or current_app).tasks[name], uuid, args, kwargs, request, app=app, **opts) ```
2014-10-31T10:03:34
celery/celery
2,522
celery__celery-2522
[ "2528" ]
63f6c9826f22bc2757b4b7674b15838d4554c7f2
diff --git a/celery/contrib/batches.py b/celery/contrib/batches.py --- a/celery/contrib/batches.py +++ b/celery/contrib/batches.py @@ -197,20 +197,20 @@ def Strategy(self, task, app, consumer): flush_buffer = self._do_flush def task_message_handler(message, body, ack, reject, callbacks, **kw): - if body is None: 31513 ? S 125:09 /usr/bin/python -m celery worker --without-heartbeat -c 50 --pool=eventlet -n [email protected] --app=mai - body, headers, decoded, utc = ( n -Q rss --without-gossip --logfile=/home/logs/rss.log --pidfile=celery6.pid - message.body, message.headers, False, True, 31528 ? R 128:34 /usr/bin/python -m celery worker --without-heartbeat -c 50 --pool=eventlet -n [email protected] --app=mai - ) n -Q rss --without-gossip --logfile=/home/logs/rss.log --pidfile=celery7.pid - if not body_can_be_buffer: 31543 ? S 124:32 /usr/bin/python -m celery worker --without-heartbeat -c 50 --pool=eventlet -n [email protected] --app=mai - body = bytes(body) if isinstance(body, buffer_t) else body n -Q rss --without-gossip --logfile=/home/logs/rss.log --pidfile=celery8.pid - else: 26150 ? S 0:50 /usr/bin/python -m celery worker --without-heartbeat -c 2 --pool=eventlet -n [email protected] --app=main - body, headers, decoded, utc = proto1_to_proto2(message, body) -Q engines --without-gossip --logfile=/home/logs/engines.log --pidfile=/home/logs/pid-engines.pid - 22409 ? S 0:00 /usr/bin/python -m celery worker --without-heartbeat -c 1 -n [email protected] --app=m - request = Req( ain -Q elasticsearch_bulk_actions --without-gossip --logfile=/home/logs/elasticsearch_bulk_actions.log --pidfile=elasticsearch_bulk_actions.pid - message, 22459 ? S 0:00 \_ /usr/bin/python -m celery worker --without-heartbeat -c 1 -n [email protected] --a - on_ack=ack, on_reject=reject, app=app, hostname=hostname, pp=main -Q elasticsearch_bulk_actions --without-gossip --logfile=/home/logs/elasticsearch_bulk_actions.log --pidfile=elasticsearch_bulk_actions.pid - eventer=eventer, task=task, connection_errors=connection_errors, 22419 ? S 0:00 /usr/bin/python -m celery worker --without-heartbeat -c 1 -n [email protected] --app=main -Q elasticsearch - body=body, headers=headers, decoded=decoded, utc=utc, _bulk_actions --without-gossip --logfile=/home/logs/elasticsearch_bulk_actions.log --pidfile=celery.pid + if body is None: + body, headers, decoded, utc = ( + message.body, message.headers, False, True, + ) + if not body_can_be_buffer: + body = bytes(body) if isinstance(body, buffer_t) else body + else: + body, headers, decoded, utc = proto1_to_proto2(message, body) + + request = Req( + message, + on_ack=ack, on_reject=reject, app=app, hostname=hostname, + eventer=eventer, task=task, connection_errors=connection_errors, + body=body, headers=headers, decoded=decoded, utc=utc, ) put_buffer(request)
Syntax errors in contrib/batches.py There are syntax errors in contrib/batches.py in the master branch in lines 200-213. Pull request #2522 removes what appear to be copy/paste artifacts causing the errors.
2015-02-28T05:03:20
celery/celery
2,580
celery__celery-2580
[ "2579" ]
0cfacb7871137d4829d6ed660c00a56e18775925
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py --- a/celery/backends/mongodb.py +++ b/celery/backends/mongodb.py @@ -78,9 +78,14 @@ def __init__(self, app=None, url=None, **kwargs): self.url = url - # default options - self.options.setdefault('max_pool_size', self.max_pool_size) - self.options.setdefault('auto_start_request', False) + + # default options according to pymongo version + if pymongo.version_tuple >= (3,): + self.options.setdefault('maxPoolSize', self.max_pool_size) + else: + self.options.setdefault('max_pool_size', self.max_pool_size) + self.options.setdefault('auto_start_request', False) + # update conf with mongo uri data, only if uri was given if self.url:
pymongo refuses backend/mongodb.py default settings `self.options.setdefault('max_pool_size', self.max_pool_size)` `self.options.setdefault('auto_start_request', False)` These lines are refusing from `pymongo` and gives `ConfigurationError` exception. By the way pymongo version is 3.0 and for use mongodb as backend i have to remove that lines from mongodb.py.
2015-04-16T11:36:11
celery/celery
2,598
celery__celery-2598
[ "2518" ]
6592ff64b6b024a4b68abcc53b151888fdf0dee3
diff --git a/celery/backends/amqp.py b/celery/backends/amqp.py --- a/celery/backends/amqp.py +++ b/celery/backends/amqp.py @@ -195,7 +195,7 @@ def drain_events(self, connection, consumer, def callback(meta, message): if meta['status'] in states.READY_STATES: - results[meta['task_id']] = meta + results[meta['task_id']] = self.meta_from_decoded(meta) consumer.callbacks[:] = [callback] time_start = now()
diff --git a/celery/tests/backends/test_amqp.py b/celery/tests/backends/test_amqp.py --- a/celery/tests/backends/test_amqp.py +++ b/celery/tests/backends/test_amqp.py @@ -13,6 +13,7 @@ from celery.backends.amqp import AMQPBackend from celery.exceptions import TimeoutError from celery.five import Empty, Queue, range +from celery.result import AsyncResult from celery.utils import uuid from celery.tests.case import ( @@ -246,10 +247,20 @@ def test_wait_for(self): with self.assertRaises(TimeoutError): b.wait_for(tid, timeout=0.01, cache=False) - def test_drain_events_remaining_timeouts(self): + def test_drain_events_decodes_exceptions_in_meta(self): + tid = uuid() + b = self.create_backend(serializer="json") + b.store_result(tid, RuntimeError("aap"), states.FAILURE) + result = AsyncResult(tid, backend=b) - class Connection(object): + with self.assertRaises(Exception) as cm: + result.get() + self.assertEqual(cm.exception.__class__.__name__, "RuntimeError") + self.assertEqual(str(cm.exception), "aap") + + def test_drain_events_remaining_timeouts(self): + class Connection(object): def drain_events(self, timeout=None): pass
CELERY_RESULT_SERIALIZER = 'json' breaks Exception marshaling Setting `CELERY_RESULT_SERIALIZER = json` and raising an exception in the worker leads to this: ``` /path/to/lib/python2.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, EXCEPTION_STATES, PROPAGATE_STATES) 173 status = meta['status'] 174 if status in PROPAGATE_STATES and propagate: --> 175 raise meta['result'] 176 return meta['result'] 177 wait = get # deprecated alias to :meth:`get`. TypeError: exceptions must be old-style classes or derived from BaseException, not dict ``` where the contents of `meta['result']` are (in my case): ``` {u'exc_message': u'unknown keys: nam', u'exc_type': u'ValueError'} ``` so it _looks_ like celery could convert the dict to a real exception before raising, but it does not currently. Changing back to `pickle` works as expected. bug can be reproduced with the following: ``` python # jsonresults.py from celery.app.base import Celery CELERY_RESULT_SERIALIZER = 'json' CELERY_RESULT_BACKEND = 'amqp' app = Celery(config_source=__name__) @app.task def hello(): raise ValueError('go away') ``` worker: ``` # python -m celery --app=jsonresults:app worker ``` caller: ``` python import jsonresults jsonresults.hello.delay().get() ```
This is biting me as well. Any news?
2015-04-29T14:52:17
celery/celery
2,651
celery__celery-2651
[ "2648" ]
dffb61c4d99f1ce5817be267104e9810e88391ee
diff --git a/celery/app/base.py b/celery/app/base.py --- a/celery/app/base.py +++ b/celery/app/base.py @@ -465,6 +465,7 @@ def mail_admins(self, subject, body, fail_silently=False): timeout=conf.EMAIL_TIMEOUT, use_ssl=conf.EMAIL_USE_SSL, use_tls=conf.EMAIL_USE_TLS, + charset=conf.EMAIL_CHARSET, ) def select_queues(self, queues=None): diff --git a/celery/app/defaults.py b/celery/app/defaults.py --- a/celery/app/defaults.py +++ b/celery/app/defaults.py @@ -220,6 +220,7 @@ def __repr__(self): 'TIMEOUT': Option(2, type='float'), 'USE_SSL': Option(False, type='bool'), 'USE_TLS': Option(False, type='bool'), + 'CHARSET': Option('us-ascii'), }, 'SERVER_EMAIL': Option('celery@localhost'), 'ADMINS': Option((), type='tuple'), diff --git a/celery/loaders/base.py b/celery/loaders/base.py --- a/celery/loaders/base.py +++ b/celery/loaders/base.py @@ -224,10 +224,11 @@ def getarg(arg): def mail_admins(self, subject, body, fail_silently=False, sender=None, to=None, host=None, port=None, user=None, password=None, timeout=None, - use_ssl=False, use_tls=False): + use_ssl=False, use_tls=False, charset='us-ascii'): message = self.mail.Message(sender=sender, to=to, subject=safe_str(subject), - body=safe_str(body)) + body=safe_str(body), + charset=charset) mailer = self.mail.Mailer(host=host, port=port, user=user, password=password, timeout=timeout, use_ssl=use_ssl,
Celery can't send error email when task has non-ascii arguments There is the issue with celery 3.1.18, python 3.4 and tasks with non-ascii string arguments. When task raises exception and CELERY_SEND_TASK_ERROR_EMAILS set to True, celery couldn't send mail because email.mime.MimeText instance raises UnicodeDecodeError('ascii', ...). Exception is raised [here](https://github.com/celery/celery/blob/dffb61c4d99f1ce5817be267104e9810e88391ee/celery/utils/mail.py#L56). Looks like Message constructor has charset argument with 'us-ascii' as default value, and it can't be changed without editing celery source code. When charset changed to 'utf-8', bug disappears. Example app: ``` python from celery import Celery app = Celery('app') app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True app.conf.ADMINS = ( ('Admin', '[email protected]'), ) @app.task def test(arg): raise Exception() test.delay('Щ') ```
2015-06-12T18:06:06
celery/celery
2,666
celery__celery-2666
[ "943" ]
6bf4664e076c4d8b6d728190802124aa5c112c5d
diff --git a/celery/schedules.py b/celery/schedules.py --- a/celery/schedules.py +++ b/celery/schedules.py @@ -134,9 +134,7 @@ def is_due(self, last_run_at): return schedstate(is_due=False, next=remaining_s) def maybe_make_aware(self, dt): - if self.utc_enabled: - return maybe_make_aware(dt, self.tz) - return dt + return maybe_make_aware(dt, self.tz) def __repr__(self): return '<freq: {0.human_seconds}>'.format(self)
diff --git a/celery/tests/app/test_beat.py b/celery/tests/app/test_beat.py --- a/celery/tests/app/test_beat.py +++ b/celery/tests/app/test_beat.py @@ -521,7 +521,7 @@ def test_maybe_make_aware(self): self.assertTrue(d.tzinfo) x.utc_enabled = False d2 = x.maybe_make_aware(datetime.utcnow()) - self.assertIsNone(d2.tzinfo) + self.assertTrue(d2.tzinfo) def test_to_local(self): x = schedule(10, app=self.app)
Celerybeat runs periodic tasks every 5 seconds regardless of interval I recently upgraded to celery 3 and have been experiencing some strange behavior with celerybeat, which was acting normally before the upgrade. My environment: python 2.7.3 django 1.4.1 virtualenv django-celery 3.0.6 celery 3.0.6 Regardless of what interval I define for my periodic tasks - which I adjust through the django admin - celerybeat fires off the periodic tasks every 5 seconds. This occurs regardless of the interval. Interestingly, however, when the periodic task is set disabled, celerybeat stops sending tasks, so it must be responsive to the scheduler at some level.
Could you try upgrading to celery 3.0.7? Also please delete an existing `celerybeat-schedule` file if any. Thanks - Will try that and update later today. On Tue, Aug 28, 2012 at 12:02 PM, Ask Solem Hoel [email protected]: > Could you try upgrading to celery 3.0.7? Also please delete an existing > celerybeat-schedule file if any. > > — > Reply to this email directly or view it on GitHubhttps://github.com/celery/celery/issues/943#issuecomment-8096911. I'm having the same problem on 3.0.7 (django-celery 3.0.6), redis broker. ``` from celery.task import periodic_task @periodic_task(run_every=crontab(hour="*", minute="0", day_of_week="*"), ignore_result=True) def my_test(): pass ``` Just to confirm - I have installed celery 3.0.7 now (although django-celery is still 3.0.6) and am still having the same issues. I am also using Redis as a broker. I have even dropped the DB, in case it was something caused by my south migrations, yet the problem still persists. Please let me know if you'd like me to try anything else. On Tue, Aug 28, 2012 at 9:38 PM, erikcw [email protected] wrote: > I'm having the same problem on 3.0.7 (django-celery 3.0.6), redis broker. > > from celery.task import periodic_task > > @periodic_task(run_every=crontab(hour="_", minute="0", day_of_week="_"), ignore_result=True) > def my_test(): > pass > > — > Reply to this email directly or view it on GitHubhttps://github.com/celery/celery/issues/943#issuecomment-8112944. I have upgraded to 3.0.8 and the problem still occurs. Issue should be reopened. Note, anyone using the database scheduler have to reset the `last_run_at` fields for each periodic task: ``` $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) ``` I have a task scheduled for 04:00 AM and found that it's executed 2151 times (every 5 seconds) from 07:00 АМ. to 10:00 АМ and then it stops. Time here is GMT+3 Also: python 2.7.3 django 1.4.1 (USE_TZ = True) virtualenv django-celery 3.0.6 celery 3.0.6 last_run_at is None Has anyone with similar problem? I have one. But in my case task runs every 5 minutes instead of run once a day. ``` Python3.4 celery==3.1.17 Django==1.8 ``` settings.py: ``` from celery.schedules import crontab USE_TZ = True TIME_ZONE = 'Europe/Moscow' CELERY_ENABLE_UTC = False CELERY_TIMEZONE = TIME_ZONE CELERYBEAT_SCHEDULE = { 'every-day': { 'task': 'app.tasks.every_day', 'schedule': crontab(minute=30, hour=0), }, } ``` Celery works under supervisord: ``` celery worker -A proj -l info --concurrency=2 -Ofair celery beat -A proj -l info ``` After 00:30 at midnight Celery sends task first time and then keep firing it until 03:30. I'm reopening this issue due to @monax's report. Can you please provide steps to reproduce? @thedrow Wait a couple of days, please. I'll try to make mini project to reproduce issue. Wonderful! thanks. https://github.com/monax/celeryissue In repo there is requirements.txt with all dependencies. As a broker I use rabbitmq. In settings.py there is a code which schedules a task to next minute after start. ``` from celery.schedules import crontab from datetime import datetime USE_TZ = True TIME_ZONE = 'Europe/Moscow' CELERY_ENABLE_UTC = False CELERY_TIMEZONE = TIME_ZONE t = datetime.today() ONE_MINUTE_AFTER = crontab(minute=t.minute + 1, hour=t.hour) CELERYBEAT_SCHEDULE = { 'every-day': { 'task': 'apptask.tasks.every_day', 'schedule': ONE_MINUTE_AFTER, }, } ``` Create a new virtualenv using pyvenv (python 3.4.3). Install requirements.txt and run in two terminals: ``` celery worker -A celeryissue -l info --concurrency=2 -Ofair celery beat -A celeryissue -l info ``` For beat you'll see: ``` celery beat -A celeryissue -l info celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ Configuration -> . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%INFO . maxinterval -> now (0s) [2015-06-15 12:35:50,911: INFO/MainProcess] beat: Starting... [2015-06-15 12:36:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-15 12:41:00,103: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-15 12:46:00,193: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) ``` For worker you'll see something like that: ``` [2015-06-15 12:36:00,009: INFO/MainProcess] Received task: apptask.tasks.every_day[c23dd76b-1181-4115-9379-d04e69879c49] [2015-06-15 12:36:00,013: INFO/Worker-2] Fire: 2015-06-15 12:36:00.013325 [2015-06-15 12:36:00,015: INFO/MainProcess] Task apptask.tasks.every_day[c23dd76b-1181-4115-9379-d04e69879c49] succeeded in 0.0025641939992055995s: None [2015-06-15 12:41:00,106: INFO/MainProcess] Received task: apptask.tasks.every_day[2d746b9e-4b83-421b-8cde-a75f6a537fc1] [2015-06-15 12:41:00,108: INFO/Worker-1] Fire: 2015-06-15 12:41:00.108269 [2015-06-15 12:41:00,110: INFO/MainProcess] Task apptask.tasks.every_day[2d746b9e-4b83-421b-8cde-a75f6a537fc1] succeeded in 0.0021385390009527327s: None [2015-06-15 12:46:00,196: INFO/MainProcess] Received task: apptask.tasks.every_day[8487f014-98f2-444a-91da-36621a5e7edc] [2015-06-15 12:46:00,198: INFO/Worker-2] Fire: 2015-06-15 12:46:00.197949 [2015-06-15 12:46:00,198: INFO/MainProcess] Task apptask.tasks.every_day[8487f014-98f2-444a-91da-36621a5e7edc] succeeded in 0.0014146600005915388s: None ``` I'll run it and let you know. I can reproduce it with `celery beat -A celeryissue -l debug --max-interval=5` ``` $ celery beat -A celeryissue -l debug --max-interval=5 celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ Configuration -> . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%DEBUG . maxinterval -> 5.00 seconds (5.0s) [2015-06-16 20:48:56,236: DEBUG/MainProcess] Setting default socket timeout to 30 [2015-06-16 20:48:56,236: INFO/MainProcess] beat: Starting... [2015-06-16 20:48:56,249: DEBUG/MainProcess] Current schedule: <Entry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)> <Entry: every-day apptask.tasks.every_day() <crontab: 49 20 * * * (m/h/d/dM/MY)> [2015-06-16 20:48:56,249: DEBUG/MainProcess] beat: Ticking with max interval->5.00 seconds [2015-06-16 20:48:56,257: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'capabilities': {'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'per_consumer_qos': True, 'authentication_failure_close': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'exchange_exchange_bindings': True}, 'version': '3.5.3', 'copyright': 'Copyright (C) 2007-2014 GoPivotal, Inc.', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'cluster_name': 'rabbit@37cfe74621ef', 'platform': 'Erlang/OTP', 'product': 'RabbitMQ'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2015-06-16 20:48:56,258: DEBUG/MainProcess] Open OK! [2015-06-16 20:48:56,269: DEBUG/MainProcess] beat: Waking up in 3.72 seconds. [2015-06-16 20:49:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:00,001: DEBUG/MainProcess] using channel_id: 1 [2015-06-16 20:49:00,002: DEBUG/MainProcess] Channel open [2015-06-16 20:49:00,004: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 20:49:00,012: DEBUG/MainProcess] apptask.tasks.every_day sent. id->640586be-bca2-49b4-ab5e-5f70d1477fe7 [2015-06-16 20:49:00,012: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:05,008: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:05,008: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a415c0f0-a254-49b3-9272-754d7a3f283e [2015-06-16 20:49:05,008: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:10,004: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:10,005: DEBUG/MainProcess] apptask.tasks.every_day sent. id->3906d59d-2726-49b3-8f2e-fdf339945337 [2015-06-16 20:49:10,005: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:15,001: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:15,001: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8b525ba8-c569-4650-a209-859568ef62b7 [2015-06-16 20:49:15,001: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:19,997: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:19,998: DEBUG/MainProcess] apptask.tasks.every_day sent. id->02d2547b-71c8-4efc-9bb3-8daa89d6f65b [2015-06-16 20:49:19,998: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:24,993: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:24,994: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c29ef47e-d72e-4b95-a88d-fe9766229005 [2015-06-16 20:49:24,994: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:29,990: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:29,990: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ba9f425c-623a-45de-9801-893e4b33f5ac [2015-06-16 20:49:29,991: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:34,986: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:34,987: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ee62e647-12c3-40dd-9360-d6607f7497b2 [2015-06-16 20:49:34,987: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:39,983: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:39,984: DEBUG/MainProcess] apptask.tasks.every_day sent. id->45d9c85e-0463-45bb-9809-7475cf107186 [2015-06-16 20:49:39,984: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:44,979: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:44,980: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8229bf8b-67d6-4dc2-89a0-ffef5e7f3699 [2015-06-16 20:49:44,980: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:49,976: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:49,977: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a9c797f7-1c1b-4677-8fa6-135c6f036235 [2015-06-16 20:49:49,977: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:54,972: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:54,973: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2215efa0-7a45-40cc-af68-00b0cac55f4a [2015-06-16 20:49:54,973: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:59,969: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:59,970: DEBUG/MainProcess] apptask.tasks.every_day sent. id->0bdeca69-b145-401c-9ef0-c7731c52ce6d [2015-06-16 20:49:59,970: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:04,965: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:04,966: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bad1b088-367a-4875-8594-272d3d2e6231 [2015-06-16 20:50:04,966: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:09,958: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:09,959: DEBUG/MainProcess] apptask.tasks.every_day sent. id->57d744d0-e5f1-4f29-9c7c-47e6af70a21d [2015-06-16 20:50:09,959: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:14,954: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:14,955: DEBUG/MainProcess] apptask.tasks.every_day sent. id->6f510b48-6127-479b-ac08-3e435e0691bb [2015-06-16 20:50:14,955: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:19,950: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:19,951: DEBUG/MainProcess] apptask.tasks.every_day sent. id->061bee73-3504-453e-90ee-7588b14f4147 [2015-06-16 20:50:19,951: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:24,946: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:24,947: DEBUG/MainProcess] apptask.tasks.every_day sent. id->eb744c6a-793b-4f9d-9d31-59c6559d3a5a [2015-06-16 20:50:24,947: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:29,942: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:29,942: DEBUG/MainProcess] apptask.tasks.every_day sent. id->9cf3ab91-1318-42e5-b577-059437487a4d [2015-06-16 20:50:29,943: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:34,938: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:34,939: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f92e235b-1796-4b4b-8d3a-d9e386dbbbe3 [2015-06-16 20:50:34,939: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:39,935: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:39,935: DEBUG/MainProcess] apptask.tasks.every_day sent. id->63f4cfb7-03d4-4e59-add7-adc7e2c9654a [2015-06-16 20:50:39,935: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:44,931: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:44,932: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a9c2f85f-a383-4e6d-883a-bddba0e615c8 [2015-06-16 20:50:44,932: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:49,927: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:49,928: DEBUG/MainProcess] apptask.tasks.every_day sent. id->aa95f5b9-ff84-448f-93ac-eea659b12711 [2015-06-16 20:50:49,928: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:54,924: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:54,925: DEBUG/MainProcess] apptask.tasks.every_day sent. id->0ccd4878-4490-4254-ae8c-a29b7e2ddbd3 [2015-06-16 20:50:54,925: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:59,920: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:59,921: DEBUG/MainProcess] apptask.tasks.every_day sent. id->d5d8b0fd-11be-41a0-88e0-8ff04dad96f3 [2015-06-16 20:50:59,921: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:04,917: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:04,918: DEBUG/MainProcess] apptask.tasks.every_day sent. id->90129b78-cb02-4056-a523-1eff080962c3 [2015-06-16 20:51:04,918: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:09,909: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:09,910: DEBUG/MainProcess] apptask.tasks.every_day sent. id->57c50ee1-a8d1-413c-90eb-b81e1d1eba29 [2015-06-16 20:51:09,910: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:14,904: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:14,905: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ec28d1f9-dd75-409e-ae0d-4eb65912586e [2015-06-16 20:51:14,905: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:19,896: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:19,897: DEBUG/MainProcess] apptask.tasks.every_day sent. id->adcc46af-3d32-4a7a-ae57-66ff1957db85 [2015-06-16 20:51:19,897: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:24,893: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:24,893: DEBUG/MainProcess] apptask.tasks.every_day sent. id->39fbe804-115e-4719-a50c-f35ead13fb54 [2015-06-16 20:51:24,894: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:29,889: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:29,889: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a918ed3d-d6b2-4b1c-aaff-f226d5f2c5c7 [2015-06-16 20:51:29,890: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:34,881: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:34,882: DEBUG/MainProcess] apptask.tasks.every_day sent. id->9062eb9c-ef0c-48f0-abe5-48841ad7631d [2015-06-16 20:51:34,882: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:39,877: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:39,878: DEBUG/MainProcess] apptask.tasks.every_day sent. id->00d184c9-2c9a-445e-baa9-c443b3145e3e [2015-06-16 20:51:39,878: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:44,871: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:44,872: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2f66aff9-af61-4f4d-a981-f84620da054c [2015-06-16 20:51:44,872: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:49,868: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:49,869: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2cd78f61-58f0-4803-a36a-4d6801463128 [2015-06-16 20:51:49,869: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:54,864: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:54,865: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f42bac9a-fef4-4e7a-a62b-b22fb7377550 [2015-06-16 20:51:54,865: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:59,856: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:59,857: DEBUG/MainProcess] apptask.tasks.every_day sent. id->11103d2c-92d2-41af-b0fa-69766e49a0d3 [2015-06-16 20:51:59,857: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:04,849: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:04,849: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 20:52:04,861: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bf70a0e7-b6c4-4a8b-beb9-9106e54cca4a [2015-06-16 20:52:04,861: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:09,857: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:09,858: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f047b489-5680-4714-b20e-f64768045ee6 [2015-06-16 20:52:09,858: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:14,854: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:14,854: DEBUG/MainProcess] apptask.tasks.every_day sent. id->be6e557f-763f-4aca-a3bc-c4c5192c26bc [2015-06-16 20:52:14,855: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:19,850: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:19,852: DEBUG/MainProcess] apptask.tasks.every_day sent. id->78ffa59b-070f-40db-bb91-fd34748db4b8 [2015-06-16 20:52:19,852: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:24,845: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:24,846: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c74c558e-860a-4d6c-a46d-4e91d950c3f9 [2015-06-16 20:52:24,846: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. ``` Can you please run your beat instance with debug log level and tell me what the interval is? @ask There's something really wrong here. Can you please take a look? I edited this issue since I can reproduce it. ~~Something is wrong with `celery.schedules.crontab.remaining_delta()`. It returns that the task was executed a day ago even though it wasn't.~~ Sorry not correct. See below. There's one problem with with the time zone setting that isn't being picked up from Django's TIME_ZONE setting which may or may not be by design. Changing Celery's time zone to my local time zone causes the bug to be reproduced later: ``` /home/omer/.virtualenvs/celeryissue3/bin/python /home/omer/.virtualenvs/celeryissue3/bin/celery beat -A celeryissue -l debug --max-interval 5 celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ [2015-06-16 21:34:13,892: DEBUG/MainProcess] Setting default socket timeout to 30 Configuration -> [2015-06-16 21:34:13,892: INFO/MainProcess] beat: Starting... . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%DEBUG . maxinterval -> 5.00 seconds (5.0s) [2015-06-16 21:34:13,899: DEBUG/MainProcess] Current schedule: <Entry: every-day apptask.tasks.every_day() <crontab: 35 21 * * * (m/h/d/dM/MY)> <Entry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)> [2015-06-16 21:34:13,900: DEBUG/MainProcess] beat: Ticking with max interval->5.00 seconds [2015-06-16 21:34:13,908: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'cluster_name': 'rabbit@37cfe74621ef', 'version': '3.5.3', 'capabilities': {'per_consumer_qos': True, 'basic.nack': True, 'consumer_priorities': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, 'exchange_exchange_bindings': True, 'connection.blocked': True}, 'copyright': 'Copyright (C) 2007-2014 GoPivotal, Inc.', 'product': 'RabbitMQ', 'platform': 'Erlang/OTP', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2015-06-16 21:34:13,909: DEBUG/MainProcess] Open OK! [2015-06-16 21:34:13,920: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:18,912: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:23,904: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:28,896: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:33,892: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:38,884: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:43,880: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:48,875: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:53,872: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:58,864: DEBUG/MainProcess] beat: Waking up in 1.13 seconds. [2015-06-16 21:35:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:00,003: DEBUG/MainProcess] using channel_id: 1 [2015-06-16 21:35:00,005: DEBUG/MainProcess] Channel open [2015-06-16 21:35:00,009: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 21:35:00,017: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c5a94c8f-1981-4de7-b880-115de8248cb4 [2015-06-16 21:35:00,018: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:05,014: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:05,014: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bfaba44b-cc10-49b3-8ba7-57b471f4ef7a [2015-06-16 21:35:05,015: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:10,010: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:10,011: DEBUG/MainProcess] apptask.tasks.every_day sent. id->6b07e184-f6fb-4fc5-927b-9e1d733bae8a [2015-06-16 21:35:10,012: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:15,005: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:15,007: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8865e9d3-ba83-4a4a-bbd3-ce0b7eb3389d [2015-06-16 21:35:15,007: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. ``` There's definitely a bug somewhere in `celery.schedules.crontab.remaining_delta()` or surrounding it and it is related to timezones. I just have no idea what it is exactly. The bug does not seem to reproduce when I specify `CELERY_ENABLE_UTC=True` so there's might be a workaround for this bug. I'll keep the process running overnight to ensure that it doesn't reproduce if CELERY_ENABLE_UTC is set to true. @monax If you'd like to assist please set a breakpoint at that function and see how it's calculated. @thedrow I forgot one litle detail. If I try to run task 00.10 at midnight then task will be fire every 5 minutes until 03.10 after midnight. I'll run it with debug log level and let you know. Ok it's verified. The bug does not reproduce if CELERY_UTC_ENABLE is set to true. I'm going to try with master later tonight.
2015-06-19T00:01:16
celery/celery
2,782
celery__celery-2782
[ "1763", "2750" ]
89d01692c2f2749a5806b87d684f895649babda7
diff --git a/celery/app/task.py b/celery/app/task.py --- a/celery/app/task.py +++ b/celery/app/task.py @@ -437,13 +437,18 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, if an error occurs while executing the task. :keyword producer: :class:`kombu.Producer` instance to use. + :keyword add_to_parent: If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. Trailing can also be disabled by default using the :attr:`trail` attribute + :keyword publisher: Deprecated alias to ``producer``. + :keyword headers: Message headers to be sent in the + task (a :class:`dict`) + :rtype :class:`celery.result.AsyncResult`: if :setting:`CELERY_ALWAYS_EAGER` is not set, otherwise :class:`celery.result.EagerResult`:
Use Cassandra as backend to store task results setting ``` CELERY_RESULT_BACKEND = 'cassandra', CASSANDRA_SERVERS = ['localhost:9160'], CASSANDRA_KEYSPACE = 'celery', CASSANDRA_COLUMN_FAMILY = 'task_results', CASSANDRA_READ_CONSISTENCY = 'ONE', CASSANDRA_WRITE_CONSISTENCY = 'ONE', CASSANDRA_OPTIONS = { 'timeout': 300, 'max_retries': 10 } ``` then ``` CREATE KEYSPACE celery WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; ``` when send a task to celery ``` Cassandra error: NotFoundException(_message=None, why='Column family task_results not found.') ``` Why didn't automatically creates column family 'task_results' like database backend does, and how do I create the column family and set what column_name in it? Document headers argument At the moment there's no much information regarding headers argument for apply_async (and I think also applies to retry). I found out about this reading the changelog for version 3.1, but maybe it could be specified as an optional property for apply_async in the documentation. I can provide the PR for this if you want. Let me know if you agree.
I don't know, I don't have experience with cassandra so maybe you could contact the authors? The celery mailing-list would also be an option to see if there are others with experience. I was able to solve this problem by doing: ``` CREATE TABLE task_results ( task_id text, name text, value text, primary key(task_id, name) ) WITH compact STORAGE ; ``` I guess the cassandra backend could solve this problem by doing a CREATE TABLE IF NOT EXISTS. New Cassandra backend will create the column family by default. Refer to #2669 . Docs improvements are always welcome! @malinoff Awesome. I'm trying to build the changes I applied but I'm getting the following error: ``` $ make html mkdir -p .build/html .build/doctrees sphinx-build -b html -d .build/doctrees . .build/html Running Sphinx v1.3.1 Extension error: Could not import extension celery.contrib.sphinx (exception: cannot import name get_fdmax) make: *** [html] Error 1 ``` I searched a little but wasn't able to find any information regarding that error. I'm using Mac OS X 10.10.3 with virtualenv (All docs requirements installed) and Python 2.7.6. Should I use Python 3? Thanks. I followed those steps and got that error On Aug 7, 2015 12:13 AM, "Dmitry Malinovsky" [email protected] wrote: > https://github.com/celery/celery/blob/master/CONTRIBUTING.rst#building-the-documentation > > — > Reply to this email directly or view it on GitHub > https://github.com/celery/celery/issues/2750#issuecomment-128576768. @juanrossi yeah sorry, you need Celery installed to build docs. @malinoff worked great with celery installed, thanks. I added the PR.
2015-08-24T17:05:24
celery/celery
2,783
celery__celery-2783
[ "2750" ]
2dda8b7c23e0dce6f3e03f844af2dead1d1f171f
diff --git a/celery/app/task.py b/celery/app/task.py --- a/celery/app/task.py +++ b/celery/app/task.py @@ -437,13 +437,18 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, if an error occurs while executing the task. :keyword producer: :class:`kombu.Producer` instance to use. + :keyword add_to_parent: If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. Trailing can also be disabled by default using the :attr:`trail` attribute + :keyword publisher: Deprecated alias to ``producer``. + :keyword headers: Message headers to be sent in the + task (a :class:`dict`) + :rtype :class:`celery.result.AsyncResult`: if :setting:`CELERY_ALWAYS_EAGER` is not set, otherwise :class:`celery.result.EagerResult`:
Document headers argument At the moment there's no much information regarding headers argument for apply_async (and I think also applies to retry). I found out about this reading the changelog for version 3.1, but maybe it could be specified as an optional property for apply_async in the documentation. I can provide the PR for this if you want. Let me know if you agree.
Docs improvements are always welcome! @malinoff Awesome. I'm trying to build the changes I applied but I'm getting the following error: ``` $ make html mkdir -p .build/html .build/doctrees sphinx-build -b html -d .build/doctrees . .build/html Running Sphinx v1.3.1 Extension error: Could not import extension celery.contrib.sphinx (exception: cannot import name get_fdmax) make: *** [html] Error 1 ``` I searched a little but wasn't able to find any information regarding that error. I'm using Mac OS X 10.10.3 with virtualenv (All docs requirements installed) and Python 2.7.6. Should I use Python 3? Thanks. I followed those steps and got that error On Aug 7, 2015 12:13 AM, "Dmitry Malinovsky" [email protected] wrote: > https://github.com/celery/celery/blob/master/CONTRIBUTING.rst#building-the-documentation > > — > Reply to this email directly or view it on GitHub > https://github.com/celery/celery/issues/2750#issuecomment-128576768. @juanrossi yeah sorry, you need Celery installed to build docs. @malinoff worked great with celery installed, thanks. I added the PR.
2015-08-24T22:25:22
celery/celery
2,840
celery__celery-2840
[ "1628" ]
045b52f1450d6d5cc500e0057a4b498250dc5692
diff --git a/celery/app/defaults.py b/celery/app/defaults.py --- a/celery/app/defaults.py +++ b/celery/app/defaults.py @@ -132,6 +132,7 @@ def __repr__(self): 'REDIS_DB': Option(type='int', **_REDIS_OLD), 'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD), 'REDIS_MAX_CONNECTIONS': Option(type='int'), + 'REJECT_ON_WORKER_LOST': Option(type='bool'), 'RESULT_BACKEND': Option(type='string'), 'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'), 'RESULT_DB_TABLENAMES': Option(type='dict'), diff --git a/celery/app/task.py b/celery/app/task.py --- a/celery/app/task.py +++ b/celery/app/task.py @@ -220,6 +220,12 @@ class Task(object): #: :setting:`CELERY_ACKS_LATE` setting. acks_late = None + #: When CELERY_ACKS_LATE is set to True, the default behavior to + #: handle worker crash is to acknowledge the message. Setting + #: this to true allows the message to be rejected and requeued so + #: it will be executed again by another worker. + reject_on_worker_lost = None + #: Tuple of expected exceptions. #: #: These are errors that are expected in normal operation @@ -248,6 +254,7 @@ class Task(object): ('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'), ('track_started', 'CELERY_TRACK_STARTED'), ('acks_late', 'CELERY_ACKS_LATE'), + ('reject_on_worker_lost', 'CELERY_REJECT_ON_WORKER_LOST'), ('ignore_result', 'CELERY_IGNORE_RESULT'), ('store_errors_even_if_ignored', 'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'), diff --git a/celery/worker/request.py b/celery/worker/request.py --- a/celery/worker/request.py +++ b/celery/worker/request.py @@ -326,7 +326,6 @@ def on_retry(self, exc_info): def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) - if isinstance(exc_info.exception, MemoryError): raise MemoryError('Process got: %s' % (exc_info.exception,)) elif isinstance(exc_info.exception, Reject): @@ -352,7 +351,13 @@ def on_failure(self, exc_info, send_failed_event=True, return_ok=False): ) # (acks_late) acknowledge after result stored. if self.task.acks_late: - self.acknowledge() + reject_and_requeue = (self.task.reject_on_worker_lost and + isinstance(exc, WorkerLostError) and + self.delivery_info.get('redelivered', False) is False) + if reject_and_requeue: + self.reject(requeue=True) + else: + self.acknowledge() if send_failed_event: self.send_event(
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py --- a/celery/tests/worker/test_request.py +++ b/celery/tests/worker/test_request.py @@ -325,6 +325,20 @@ def test_on_failure_Reject_rejects_with_requeue(self): req_logger, req.connection_errors, True, ) + def test_on_failure_WrokerLostError_rejects_with_requeue(self): + einfo = None + try: + raise WorkerLostError() + except: + einfo = ExceptionInfo(internal=True) + req = self.get_request(self.add.s(2, 2)) + req.task.acks_late = True + req.task.reject_on_worker_lost = True + req.delivery_info['redelivered'] = False + req.on_failure(einfo) + req.on_reject.assert_called_with(req_logger, + req.connection_errors, True) + def test_tzlocal_is_cached(self): req = self.get_request(self.add.s(2, 2)) req._tzlocal = 'foo'
Message being acknowledged on WorkerLostError when CELERY_ACKS_LATE=True When using celery v3.0.24, with `CELERY_ACKS_LATE = True` , if the OOM killer kills the celery worker, then the worker acknowledges the message. As per [this](https://github.com/celery/celery/commit/e810420c) commit. The `exc_info.internal` comes in as `false`, which means it is not a internal error, due to which the message is acknowledged. The desirable behaviour, in such a case would be to not acknowledge the message (and be able to know, whether its a OOM error), so that some other worker can pick it up. As a workaround, I've commented out the [code](https://github.com/siddharth96/celery/commit/427695d1b23034dadda85fd7a48f7367831be4fa), where celery acknowledges the message, because in such a case, message will be lost.
This is deliberate as if a task is killed it may mean that the next invocation will also cause the same to happen. If the task is redelivered it may cause a loop where the same conditions occur again and again. Also, sadly you cannot distinguish processes killed by OOM from processes killed by other means, and if an administrator kills -9 a task going amok, you usually don't want that task to be called again. There could be a configuration option for not acking terminated tasks, but I'm not sure how useful that would be. A better solution could be to use `basic_reject(requeue=False)` instead of `basic_ack`, that way you can configure a dead letter queue so that the killed tasks will be sent to a queue for manual inspection. I must say, regardless of the status of this feature request, the documentation is misleading. Specifically, [this FAQ makes it seem that process failures would NOT acknowledge messages](http://celery.readthedocs.org/en/latest/faq.html#faq-acks-late-vs-retry). And [this FAQ boldface states](http://celery.readthedocs.org/en/latest/faq.html#id54) that in the event of a kill signal (9), that acks_late will allow the task to re-run (which again, is patently wrong based on this poorly documented behavior). Nowhere in the docs have I found that if the process _dies_, the message will be acknowledged, regardless of acks_late or not. (for instance, I have a set of 10k+ tasks, and some 1% of tasks wind up acknowledged but incomplete when a WorkerLostError is thrown in connection with the worker, although there are no other errors of any kind in any of my logs related to that task). TL;DR at the least, appropriately document the current state when describing the functionality and limitations of acks_late. A work-around would be helpful -- I'm not sure I understand the solution of using `basic_reject`, although I'll keep looking into it. The docs are referring to killing the worker process with KILL, not the child processes. The term worker will always refer to the worker instance, not the pool processes. The section within about acks_late is probably not very helpful and should be removed
2015-10-06T05:34:34
celery/celery
2,901
celery__celery-2901
[ "2900" ]
991982583773555f918a236279a06cf36a32cbcf
diff --git a/celery/backends/cassandra.py b/celery/backends/cassandra.py --- a/celery/backends/cassandra.py +++ b/celery/backends/cassandra.py @@ -125,13 +125,14 @@ def __init__(self, servers=None, keyspace=None, table=None, entry_ttl=None, self._session = None self._write_stmt = None self._read_stmt = None + self._make_stmt = None def process_cleanup(self): if self._connection is not None: - self._connection = None - if self._session is not None: - self._session.shutdown() - self._session = None + self._connection.shutdown() # also shuts down _session + + self._connection = None + self._session = None def _get_connection(self, write=False): """Prepare the connection for action @@ -171,6 +172,7 @@ def _get_connection(self, write=False): Q_CREATE_RESULT_TABLE.format(table=self.table), ) self._make_stmt.consistency_level = self.write_consistency + try: self._session.execute(self._make_stmt) except cassandra.AlreadyExists: @@ -179,10 +181,11 @@ def _get_connection(self, write=False): except cassandra.OperationTimedOut: # a heavily loaded or gone Cassandra cluster failed to respond. # leave this class in a consistent state - self._connection = None - if self._session is not None: - self._session.shutdown() + if self._connection is not None: + self._connection.shutdown() # also shuts down _session + self._connection = None + self._session = None raise # we did fail after all - reraise def _store_result(self, task_id, result, status,
diff --git a/celery/tests/backends/test_cassandra.py b/celery/tests/backends/test_cassandra.py --- a/celery/tests/backends/test_cassandra.py +++ b/celery/tests/backends/test_cassandra.py @@ -41,6 +41,7 @@ def test_init_with_and_without_LOCAL_QUROM(self): with mock_module(*CASSANDRA_MODULES): from celery.backends import cassandra as mod mod.cassandra = Mock() + cons = mod.cassandra.ConsistencyLevel = Object() cons.LOCAL_QUORUM = 'foo' @@ -68,6 +69,7 @@ def test_get_task_meta_for(self): with mock_module(*CASSANDRA_MODULES): from celery.backends import cassandra as mod mod.cassandra = Mock() + x = mod.CassandraBackend(app=self.app) x._connection = True session = x._session = Mock() @@ -120,6 +122,9 @@ def __init__(self, *args, **kwargs): def connect(self, *args, **kwargs): raise OTOExc() + def shutdown(self): + pass + mod.cassandra = Mock() mod.cassandra.OperationTimedOut = OTOExc mod.cassandra.cluster = Mock() @@ -133,3 +138,37 @@ def connect(self, *args, **kwargs): self.assertIsNone(x._session) x.process_cleanup() # should not raise + + + def test_please_free_memory(self): + """ + Ensure that Cluster object IS shut down. + """ + with mock_module(*CASSANDRA_MODULES): + from celery.backends import cassandra as mod + + class RAMHoggingCluster(object): + + objects_alive = 0 + + def __init__(self, *args, **kwargs): + pass + + def connect(self, *args, **kwargs): + RAMHoggingCluster.objects_alive += 1 + return Mock() + + def shutdown(self): + RAMHoggingCluster.objects_alive -= 1 + + mod.cassandra = Mock() + + mod.cassandra.cluster = Mock() + mod.cassandra.cluster.Cluster = RAMHoggingCluster + + for x in range(0, 10): + x = mod.CassandraBackend(app=self.app) + x._store_result('task_id', 'result', states.SUCCESS) + x.process_cleanup() + + self.assertEquals(RAMHoggingCluster.objects_alive, 0)
new_cassandra + Celery leaks memory Continuing performing stress test I ran into a situation where next jobs done using Celery + new_cassandra backend leak memory. Running tasks I get bigger and bigger ru_maxrss value ``` [2015-10-31 21:24:46,598: WARNING/Worker-1] resource.struct_rusage(ru_utime=0.010111, ru_stime=0.00629, ru_maxrss=25748, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=1289, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=2, ru_nivcsw=237) [2015-10-31 21:24:46,601: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[f2695142-0680-4565-a51c-790c36f685f2] [2015-10-31 21:25:07,208: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[c9cb66b1-d8ce-45c5-ab6b-05c516ec5464] succeeded in 20.61094686s: None [2015-10-31 21:25:07,211: WARNING/Worker-1] resource.struct_rusage(ru_utime=1.141192, ru_stime=0.249278, ru_maxrss=31512, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=2940, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=19155, ru_nivcsw=1739) [2015-10-31 21:25:07,221: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[cadb2d50-eb12-4e4c-aa74-57bfd4bf41d3] [2015-10-31 21:25:23,323: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[5d2ee4a0-b25d-4107-98b7-d55200f18a72] succeeded in 16.112931506s: None [2015-10-31 21:25:23,325: WARNING/Worker-1] resource.struct_rusage(ru_utime=2.084558, ru_stime=0.413746, ru_maxrss=32288, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=3154, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=34251, ru_nivcsw=2860) [2015-10-31 21:25:23,338: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[a2b3b191-3b0a-4bb6-ab77-dbaf2113fe09] [2015-10-31 21:25:43,668: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[d3e5539f-8f12-4bec-b1a8-19a26a6a665e] succeeded in 20.34311718s: None [2015-10-31 21:25:43,671: WARNING/Worker-1] resource.struct_rusage(ru_utime=3.290886, ru_stime=0.659077, ru_maxrss=33072, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=3343, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=53488, ru_nivcsw=4137) [2015-10-31 21:25:43,682: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[99a6ca5f-5734-4eab-b148-b2a077a754b6] [2015-10-31 21:26:04,140: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[283055a5-b466-4435-812b-0d8dbaefb2bf] succeeded in 20.469805369s: None [2015-10-31 21:26:04,143: WARNING/Worker-1] resource.struct_rusage(ru_utime=4.581904, ru_stime=0.922214, ru_maxrss=33860, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=3537, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=73128, ru_nivcsw=5329) ```
I added monitoring thread count beneath every resource.getrusage call. It's unsettling at best. ``` [2015-10-31 21:28:57,388: WARNING/Worker-1] resource.struct_rusage(ru_utime=0.008466999999999999, ru_stime=0.00616, ru_maxrss=26192, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=1297, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=2, ru_nivcsw=28) [2015-10-31 21:28:57,388: WARNING/Worker-1] 1 [2015-10-31 21:28:57,392: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[197e1f45-c2c9-4bcf-9dc1-014e07a97398] [2015-10-31 21:29:17,974: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[cadb2d50-eb12-4e4c-aa74-57bfd4bf41d3] succeeded in 20.586918129s: None [2015-10-31 21:29:17,979: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[560d4dfa-85cc-4901-a097-69f70b3c2463] [2015-10-31 21:29:17,977: WARNING/Worker-1] resource.struct_rusage(ru_utime=1.162278, ru_stime=0.22641699999999998, ru_maxrss=31880, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=2960, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=18810, ru_nivcsw=1683) [2015-10-31 21:29:17,980: WARNING/Worker-1] 8 [2015-10-31 21:29:38,340: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[a2b3b191-3b0a-4bb6-ab77-dbaf2113fe09] succeeded in 20.363800806s: None [2015-10-31 21:29:38,343: WARNING/Worker-1] resource.struct_rusage(ru_utime=2.282896, ru_stime=0.463669, ru_maxrss=32540, ru_ixrss=0, ru_idrss=0, ru_isrss=0, ru_minflt=3179, ru_majflt=0, ru_nswap=0, ru_inblock=0, ru_oublock=0, ru_msgsnd=0, ru_msgrcv=0, ru_nsignals=0, ru_nvcsw=37622, ru_nivcsw=2898) [2015-10-31 21:29:38,344: WARNING/Worker-1] 11 [2015-10-31 21:29:38,356: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[9b018659-e886-4622-a5b5-36b18f24ce77] ``` It seems that DataStax's Cassandra driver is spawning something behind our backs. ``` [2015-10-31 21:34:35,052: INFO/MainProcess] Task s4jobs.tasks.decommission_device.decommission_device[78aab1fc-f771-4ac5-bff1-8b7600a7760e] succeeded in 20.630556935s: None [2015-10-31 21:34:35,055: WARNING/Worker-1] Thread 0 [2015-10-31 21:34:35,057: WARNING/Worker-1] <_MainThread(MainThread, started 140697746777856)> [2015-10-31 21:34:35,058: WARNING/Worker-1] File "/usr/bin/celery", line 11, in <module> [2015-10-31 21:34:35,059: WARNING/Worker-1] sys.exit(main()) [2015-10-31 21:34:35,059: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/__main__.py", line 30, in main [2015-10-31 21:34:35,060: WARNING/Worker-1] main() [2015-10-31 21:34:35,061: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 81, in main [2015-10-31 21:34:35,061: WARNING/Worker-1] cmd.execute_from_commandline(argv) [2015-10-31 21:34:35,062: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 769, in execute_from_commandline [2015-10-31 21:34:35,063: WARNING/Worker-1] super(CeleryCommand, self).execute_from_commandline(argv))) [2015-10-31 21:34:35,064: INFO/MainProcess] Received task: s4jobs.tasks.decommission_device.decommission_device[bda25b52-e221-4797-8d5c-f17e384a82f9] [2015-10-31 21:34:35,063: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/base.py", line 311, in execute_from_commandline [2015-10-31 21:34:35,066: WARNING/Worker-1] return self.handle_argv(self.prog_name, argv[1:]) [2015-10-31 21:34:35,067: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 761, in handle_argv [2015-10-31 21:34:35,068: WARNING/Worker-1] return self.execute(command, argv) [2015-10-31 21:34:35,069: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 693, in execute [2015-10-31 21:34:35,069: WARNING/Worker-1] ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) [2015-10-31 21:34:35,070: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/worker.py", line 179, in run_from_argv [2015-10-31 21:34:35,070: WARNING/Worker-1] return self(*args, **options) [2015-10-31 21:34:35,071: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/base.py", line 274, in __call__ [2015-10-31 21:34:35,072: WARNING/Worker-1] ret = self.run(*args, **kwargs) [2015-10-31 21:34:35,072: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bin/worker.py", line 212, in run [2015-10-31 21:34:35,073: WARNING/Worker-1] state_db=self.node_format(state_db, hostname), **kwargs [2015-10-31 21:34:35,073: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/worker/__init__.py", line 206, in start [2015-10-31 21:34:35,074: WARNING/Worker-1] self.blueprint.start(self) [2015-10-31 21:34:35,074: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start [2015-10-31 21:34:35,074: WARNING/Worker-1] step.start(parent) [2015-10-31 21:34:35,074: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/bootsteps.py", line 374, in start [2015-10-31 21:34:35,075: WARNING/Worker-1] return self.obj.start() [2015-10-31 21:34:35,075: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/concurrency/base.py", line 131, in start [2015-10-31 21:34:35,075: WARNING/Worker-1] self.on_start() [2015-10-31 21:34:35,075: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/concurrency/prefork.py", line 117, in on_start [2015-10-31 21:34:35,076: WARNING/Worker-1] **self.options) [2015-10-31 21:34:35,076: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/concurrency/asynpool.py", line 400, in __init__ [2015-10-31 21:34:35,077: WARNING/Worker-1] super(AsynPool, self).__init__(processes, *args, **kwargs) [2015-10-31 21:34:35,077: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/pool.py", line 968, in __init__ [2015-10-31 21:34:35,077: WARNING/Worker-1] self._create_worker_process(i) [2015-10-31 21:34:35,077: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/pool.py", line 1064, in _create_worker_process [2015-10-31 21:34:35,078: WARNING/Worker-1] w.start() [2015-10-31 21:34:35,078: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/process.py", line 137, in start [2015-10-31 21:34:35,078: WARNING/Worker-1] self._popen = Popen(self) [2015-10-31 21:34:35,079: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/forking.py", line 105, in __init__ [2015-10-31 21:34:35,079: WARNING/Worker-1] code = process_obj._bootstrap() [2015-10-31 21:34:35,079: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/process.py", line 292, in _bootstrap [2015-10-31 21:34:35,080: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,080: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/pool.py", line 291, in run [2015-10-31 21:34:35,080: WARNING/Worker-1] sys.exit(self.workloop(pid=pid)) [2015-10-31 21:34:35,080: WARNING/Worker-1] File "/usr/lib64/python2.7/site-packages/billiard/pool.py", line 363, in workloop [2015-10-31 21:34:35,081: WARNING/Worker-1] result = (True, prepare_result(fun(*args, **kwargs))) [2015-10-31 21:34:35,081: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/app/trace.py", line 349, in _fast_trace_task [2015-10-31 21:34:35,081: WARNING/Worker-1] return _tasks[task].__trace__(uuid, args, kwargs, request)[0] [2015-10-31 21:34:35,081: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task [2015-10-31 21:34:35,082: WARNING/Worker-1] R = retval = fun(*args, **kwargs) [2015-10-31 21:34:35,082: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__ [2015-10-31 21:34:35,082: WARNING/Worker-1] return self.run(*args, **kwargs) [2015-10-31 21:34:35,082: WARNING/Worker-1] File "/smok4/src/s4jobs/tasks/decommission_device.py", line 33, in decommission_device [2015-10-31 21:34:35,082: WARNING/Worker-1] traceback.print_stack(sys._current_frames()[th.ident]) [2015-10-31 21:34:35,083: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,083: WARNING/Worker-1] Thread 1 [2015-10-31 21:34:35,083: WARNING/Worker-1] <Thread(Task Scheduler, started daemon 140697458624256)> [2015-10-31 21:34:35,085: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,086: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,086: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,086: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,087: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 766, in run [2015-10-31 21:34:35,087: WARNING/Worker-1] self.__target(*self.__args, **self.__kwargs) [2015-10-31 21:34:35,087: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/cassandra/cluster.py", line 2495, in run [2015-10-31 21:34:35,087: WARNING/Worker-1] run_at, task = self._queue.get(block=True, timeout=None) [2015-10-31 21:34:35,088: WARNING/Worker-1] File "/usr/lib64/python2.7/Queue.py", line 168, in get [2015-10-31 21:34:35,088: WARNING/Worker-1] self.not_empty.wait() [2015-10-31 21:34:35,089: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 340, in wait [2015-10-31 21:34:35,089: WARNING/Worker-1] waiter.acquire() [2015-10-31 21:34:35,089: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,090: WARNING/Worker-1] Thread 2 [2015-10-31 21:34:35,090: WARNING/Worker-1] <Thread(cassandra_driver_event_loop, started daemon 140697450231552)> [2015-10-31 21:34:35,091: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,091: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,091: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,091: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,092: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 766, in run [2015-10-31 21:34:35,092: WARNING/Worker-1] self.__target(*self.__args, **self.__kwargs) [2015-10-31 21:34:35,092: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/cassandra/io/asyncorereactor.py", line 91, in _run_loop [2015-10-31 21:34:35,092: WARNING/Worker-1] asyncore.loop(timeout=0.001, use_poll=True, count=1000) [2015-10-31 21:34:35,093: WARNING/Worker-1] File "/usr/lib64/python2.7/asyncore.py", line 220, in loop [2015-10-31 21:34:35,093: WARNING/Worker-1] poll_fun(timeout, map) [2015-10-31 21:34:35,093: WARNING/Worker-1] File "/usr/lib64/python2.7/asyncore.py", line 192, in poll2 [2015-10-31 21:34:35,093: WARNING/Worker-1] r = pollster.poll(timeout) [2015-10-31 21:34:35,094: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,094: WARNING/Worker-1] Thread 3 [2015-10-31 21:34:35,094: WARNING/Worker-1] <ConnectionHeartbeat(Connection heartbeat, started daemon 140697441052416)> [2015-10-31 21:34:35,095: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,095: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,095: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,096: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,096: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/cassandra/connection.py", line 799, in run [2015-10-31 21:34:35,097: WARNING/Worker-1] self._shutdown_event.wait(self._interval) [2015-10-31 21:34:35,097: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 624, in wait [2015-10-31 21:34:35,097: WARNING/Worker-1] self.__cond.wait(timeout, balancing) [2015-10-31 21:34:35,097: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 362, in wait [2015-10-31 21:34:35,097: WARNING/Worker-1] _sleep(delay) [2015-10-31 21:34:35,098: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,098: WARNING/Worker-1] Thread 4 [2015-10-31 21:34:35,098: WARNING/Worker-1] <Thread(Thread-2, started daemon 140697197856512)> [2015-10-31 21:34:35,099: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,099: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,100: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,100: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,100: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 766, in run [2015-10-31 21:34:35,100: WARNING/Worker-1] self.__target(*self.__args, **self.__kwargs) [2015-10-31 21:34:35,101: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/concurrent/futures/thread.py", line 65, in _worker [2015-10-31 21:34:35,101: WARNING/Worker-1] work_item = work_queue.get(block=True) [2015-10-31 21:34:35,101: WARNING/Worker-1] File "/usr/lib64/python2.7/Queue.py", line 168, in get [2015-10-31 21:34:35,101: WARNING/Worker-1] self.not_empty.wait() [2015-10-31 21:34:35,102: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 340, in wait [2015-10-31 21:34:35,102: WARNING/Worker-1] waiter.acquire() [2015-10-31 21:34:35,102: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,102: WARNING/Worker-1] Thread 5 [2015-10-31 21:34:35,103: WARNING/Worker-1] <ConnectionHeartbeat(Connection heartbeat, started daemon 140697206249216)> [2015-10-31 21:34:35,103: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,103: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,103: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,104: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,104: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/cassandra/connection.py", line 799, in run [2015-10-31 21:34:35,104: WARNING/Worker-1] self._shutdown_event.wait(self._interval) [2015-10-31 21:34:35,104: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 624, in wait [2015-10-31 21:34:35,105: WARNING/Worker-1] self.__cond.wait(timeout, balancing) [2015-10-31 21:34:35,105: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 362, in wait [2015-10-31 21:34:35,105: WARNING/Worker-1] _sleep(delay) [2015-10-31 21:34:35,105: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,105: WARNING/Worker-1] Thread 6 [2015-10-31 21:34:35,106: WARNING/Worker-1] <Thread(Thread-1, started daemon 140697223034624)> [2015-10-31 21:34:35,106: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,107: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,107: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,107: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,107: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 766, in run [2015-10-31 21:34:35,108: WARNING/Worker-1] self.__target(*self.__args, **self.__kwargs) [2015-10-31 21:34:35,108: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/concurrent/futures/thread.py", line 65, in _worker [2015-10-31 21:34:35,108: WARNING/Worker-1] work_item = work_queue.get(block=True) [2015-10-31 21:34:35,108: WARNING/Worker-1] File "/usr/lib64/python2.7/Queue.py", line 168, in get [2015-10-31 21:34:35,109: WARNING/Worker-1] self.not_empty.wait() [2015-10-31 21:34:35,109: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 340, in wait [2015-10-31 21:34:35,109: WARNING/Worker-1] waiter.acquire() [2015-10-31 21:34:35,110: WARNING/Worker-1] ================================== [2015-10-31 21:34:35,110: WARNING/Worker-1] Thread 7 [2015-10-31 21:34:35,110: WARNING/Worker-1] <Thread(Task Scheduler, started daemon 140697214641920)> [2015-10-31 21:34:35,110: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 786, in __bootstrap [2015-10-31 21:34:35,111: WARNING/Worker-1] self.__bootstrap_inner() [2015-10-31 21:34:35,111: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 813, in __bootstrap_inner [2015-10-31 21:34:35,111: WARNING/Worker-1] self.run() [2015-10-31 21:34:35,111: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 766, in run [2015-10-31 21:34:35,112: WARNING/Worker-1] self.__target(*self.__args, **self.__kwargs) [2015-10-31 21:34:35,112: WARNING/Worker-1] File "/usr/lib/python2.7/site-packages/cassandra/cluster.py", line 2495, in run [2015-10-31 21:34:35,112: WARNING/Worker-1] run_at, task = self._queue.get(block=True, timeout=None) [2015-10-31 21:34:35,112: WARNING/Worker-1] File "/usr/lib64/python2.7/Queue.py", line 168, in get [2015-10-31 21:34:35,113: WARNING/Worker-1] self.not_empty.wait() [2015-10-31 21:34:35,113: WARNING/Worker-1] File "/usr/lib64/python2.7/threading.py", line 340, in wait [2015-10-31 21:34:35,113: WARNING/Worker-1] waiter.acquire() [2015-10-31 21:34:35,113: WARNING/Worker-1] ================================== ``` OK, this is due to me being smart and not calling shutdown() on Cluster instances, which looks like it might need "shutdowning". Fix and test is pending.
2015-10-31T23:21:57
celery/celery
3,218
celery__celery-3218
[ "3211" ]
ac0d9d5ab7177745c3e3b3115f9f2c4121f0432f
diff --git a/celery/bin/multi.py b/celery/bin/multi.py --- a/celery/bin/multi.py +++ b/celery/bin/multi.py @@ -458,9 +458,14 @@ def _args_for_node(p, name, prefix, suffix, cmd, append, options): name, nodename, expand = _get_nodename( name, prefix, suffix, options) + if nodename in p.namespaces: + ns = nodename + else: + ns = name + argv = ([expand(cmd)] + [format_opt(opt, expand(value)) - for opt, value in items(p.optmerge(name, options))] + + for opt, value in items(p.optmerge(ns, options))] + [p.passthrough]) if append: argv.append(expand(append))
diff --git a/celery/tests/bin/test_multi.py b/celery/tests/bin/test_multi.py --- a/celery/tests/bin/test_multi.py +++ b/celery/tests/bin/test_multi.py @@ -149,6 +149,22 @@ def assert_line_in(name, args): ['COMMAND', '-n foo@', '-c 5', '']), ) + p4 = NamespacedOptionParser(['foo', '-Q:1', 'test']) + names6 = list(multi_args(p4, cmd='COMMAND', suffix='""')) + self.assertEqual( + names6[0][0:2], + ('foo@', + ['COMMAND', '-n foo@', '-Q test', '']), + ) + + p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test']) + names7 = list(multi_args(p5, cmd='COMMAND', suffix='""')) + self.assertEqual( + names7[0][0:2], + ('foo@bar', + ['COMMAND', '-n foo@bar', '-Q test', '']), + ) + class test_MultiTool(AppCase):
celery multi ignores FQNs and subscribes to incorrect queue. The following does not work: `celery multi start [email protected] [email protected] -A tserver.app -Q:1 celery,hq1 -Q:2 celery,high,hq1 -c 4 -l info` Output: ``` celery multi v3.1.23 (Cipater) > Starting nodes... > [email protected]: OK > [email protected]: OK ``` In this case both workers subscribe only to the `celery` queue. However, the following works: `celery multi start 2 -A tserver.app -Q:1 celery,hq1 -Q:2 celery,high,hq1 -c 4 -l info` In this case, both workers subscribe to correct queues. Output: ``` celery multi v3.1.23 (Cipater) > Starting nodes... > [email protected]: OK > [email protected]: OK ```
2016-05-20T18:17:15
celery/celery
3,392
celery__celery-3392
[ "3391" ]
5031d6f27862001d3e3bc5a2dacf1185c933f2c9
diff --git a/celery/utils/serialization.py b/celery/utils/serialization.py --- a/celery/utils/serialization.py +++ b/celery/utils/serialization.py @@ -24,7 +24,7 @@ except ImportError: import pickle # noqa -PY3 = sys.version_info[0] >= 3 +PY33 = sys.version_info >= (3, 3) __all__ = [ 'UnpickleableExceptionWrapper', 'subclass_exception', @@ -241,7 +241,9 @@ def jsonify(obj, return unknown_type_filter(obj) -if PY3: +# Since PyPy 3 targets Python 3.2, 'raise exc from None' will +# raise a TypeError so we need to look for Python 3.3 or newer +if PY33: from vine.five import exec_ _raise_with_context = None # for flake8 exec_("""def _raise_with_context(exc, ctx): raise exc from ctx""")
test_retry_kwargs_can_be_empty fails on pypy3 From https://travis-ci.org/celery/celery/jobs/151613800: ``` ====================================================================== ERROR: test_retry_kwargs_can_be_empty (celery.tests.tasks.test_tasks.test_task_retries) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/build/celery/celery/celery/tests/tasks/test_tasks.py", line 178, in test_retry_kwargs_can_be_empty self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) File "/home/travis/build/celery/celery/celery/app/task.py", line 611, in retry raise_with_context(exc or Retry('Task can be retried', None)) File "/home/travis/build/celery/celery/celery/utils/serialization.py", line 255, in raise_with_context _raise_with_context(exc, exc_info[1]) File "<string>", line 1, in _raise_with_context TypeError: exception causes must derive from BaseException, not NoneType ``` https://github.com/celery/celery/blob/5031d6f27862001d3e3bc5a2dacf1185c933f2c9/celery/tests/tasks/test_tasks.py#L169
It looks like the culprit is https://github.com/celery/celery/commit/32b52ca875509b84d786e33ce2d39f62ab7ea050. Since `raise Exception from None` is new in Python 3.3 and PyPy 3 supports Python 3.2, I think the `if PY3` clause needs to be updated to `if PY33`.
2016-08-14T04:03:33
celery/celery
3,616
celery__celery-3616
[ "4412" ]
0dbc83a4cf22ff6c38a4da2ef18781905af66c92
diff --git a/celery/app/registry.py b/celery/app/registry.py --- a/celery/app/registry.py +++ b/celery/app/registry.py @@ -4,7 +4,7 @@ import inspect from importlib import import_module from celery._state import get_current_app -from celery.exceptions import NotRegistered +from celery.exceptions import NotRegistered, InvalidTaskError from celery.five import items __all__ = ['TaskRegistry'] @@ -22,8 +22,10 @@ def register(self, task): """Register a task in the task registry. The task will be automatically instantiated if not already an - instance. + instance. Name must be configured prior to registration. """ + if task.name is None: + raise InvalidTaskError('Task "class {0}" must specify name'.format(task.__class__.__name__)) self[task.name] = inspect.isclass(task) and task() or task def unregister(self, name):
diff --git a/t/unit/app/test_registry.py b/t/unit/app/test_registry.py --- a/t/unit/app/test_registry.py +++ b/t/unit/app/test_registry.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, unicode_literals import pytest from celery.app.registry import _unpickle_task, _unpickle_task_v2 +from celery.exceptions import InvalidTaskError def returns(): @@ -24,6 +25,8 @@ class test_TaskRegistry: def setup(self): self.mytask = self.app.task(name='A', shared=False)(returns) + self.missing_name_task = self.app.task(name=None, shared=False)(returns) + self.missing_name_task.name = None # name is overridden with path self.myperiodic = self.app.task( name='B', shared=False, type='periodic', )(returns) @@ -45,6 +48,9 @@ def test_task_registry(self): self.assert_register_unregister_cls(r, self.mytask) self.assert_register_unregister_cls(r, self.myperiodic) + with pytest.raises(InvalidTaskError): + r.register(self.missing_name_task) + r.register(self.myperiodic) r.unregister(self.myperiodic.name) assert self.myperiodic not in r
Request on_timeout should ignore soft time limit exception When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller. But the task may catch this exception and eg. return (this is what soft timeout are for). This cause: 1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task 2. the task status to be passed to failure and to success on the same manner 3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks… 1, 2 and 3 can leads of course to strange race conditions… ## Steps to reproduce (Illustration) with the program in test_timeout.py: ```python import time import celery app = celery.Celery('test_timeout') app.conf.update( result_backend="redis://localhost/0", broker_url="amqp://celery:celery@localhost:5672/host", ) @app.task(soft_time_limit=1) def test(): try: time.sleep(2) except Exception: return 1 @app.task() def add(args): print("### adding", args) return sum(args) @app.task() def on_error(context, exception, traceback, **kwargs): print("### on_error: ", exception) if __name__ == "__main__": result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s()) result.get() ``` start a worker and the program: ``` $ celery -A test_timeout worker -l WARNING $ python3 test_timeout.py ``` ## Expected behavior add method is called with `[1, 1]` as argument and test_timeout.py return normally ## Actual behavior The test_timeout.py fails, with ``` celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)", ``` On the worker side, the **on_error is called but the add method as well !** ``` [2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2] [2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757] [2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",) Traceback (most recent call last): File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp> callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',) [2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding [2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1] ``` Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact: - the chord result is incremented twice by the error of soft time limit - the chord result is again incremented twice by the correct returning of `test` task ## Conclusion Request.on_timeout should not process soft time limit exception. here is a quick monkey patch (correction of celery is trivial) ```python def patch_celery_request_on_timeout(): from celery.worker import request orig = request.Request.on_timeout def patched_on_timeout(self, soft, timeout): if not soft: orig(self, soft, timeout) request.Request.on_timeout = patched_on_timeout patch_celery_request_on_timeout() ``` ## version info software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis://10.0.3.253/0
2016-11-22T20:38:13
celery/celery
3,669
celery__celery-3669
[ "3651" ]
56ff9caaf39151c7a9a930fc2bf245e2def141a8
diff --git a/celery/result.py b/celery/result.py --- a/celery/result.py +++ b/celery/result.py @@ -867,10 +867,13 @@ def children(self): return self.results @classmethod - def restore(cls, id, backend=None): + def restore(cls, id, backend=None, app=None): """Restore previously saved group result.""" + + app = app or cls.app + return ( - backend or (cls.app.backend if cls.app else current_app.backend) + backend or (app.backend if app else current_app.backend) ).restore_group(id)
diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py --- a/t/unit/tasks/test_result.py +++ b/t/unit/tasks/test_result.py @@ -19,6 +19,7 @@ AsyncResult, EagerResult, ResultSet, + GroupResult, result_from_tuple, assert_will_not_block, ) @@ -615,6 +616,13 @@ def test_save_restore(self): with pytest.raises(AttributeError): self.app.GroupResult.restore(ts.id, backend=object()) + def test_restore_app(self): + subs = [MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) + ts.save() + restored = GroupResult.restore(ts.id, app=self.app) + assert restored.id == ts.id + def test_join_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend)
Specifying app/backend in GroupResult.restore is slightly awkward In Celery 3.x I used `GroupResult.restore('some-id')` and it somehow found my app and figured out the backend (not sure how, but it worked). In Celery 4.x this does not work (it does not find the backend and crashes). That makes sense to me (I like the idea of explicit apps a lot!). `GroupResult.restore()` does not accept a `app` argument which would be convenient. It does however support a `backend` argument, so I changed my code to use `GroupResult.restore('my-id', backend=app.backend)` and everything is fine. However, to me this is not that super-intuitive, my suggestion is that we should add an `app` kwarg to `restore()` that can be used like: ```py GroupResult.restore('someid', app=app) ``` Do you think this makes sense? Then I'd be happy to work on a PR to add the argument!
Yeah, I definitely think that makes sense! The argument was probably named backend before the concept of apps. I was not aware, but GroupResult is also available on the app, another way to solve this is to use: ```py app.GroupResult.restore('someid') ```
2016-12-09T10:05:34
celery/celery
3,671
celery__celery-3671
[ "4412" ]
56ff9caaf39151c7a9a930fc2bf245e2def141a8
diff --git a/examples/next-steps/proj/tasks.py b/examples/next-steps/proj/tasks.py --- a/examples/next-steps/proj/tasks.py +++ b/examples/next-steps/proj/tasks.py @@ -1,5 +1,5 @@ from __future__ import absolute_import, unicode_literals -from . import app +from .celery import app @app.task
Request on_timeout should ignore soft time limit exception When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller. But the task may catch this exception and eg. return (this is what soft timeout are for). This cause: 1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task 2. the task status to be passed to failure and to success on the same manner 3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks… 1, 2 and 3 can leads of course to strange race conditions… ## Steps to reproduce (Illustration) with the program in test_timeout.py: ```python import time import celery app = celery.Celery('test_timeout') app.conf.update( result_backend="redis://localhost/0", broker_url="amqp://celery:celery@localhost:5672/host", ) @app.task(soft_time_limit=1) def test(): try: time.sleep(2) except Exception: return 1 @app.task() def add(args): print("### adding", args) return sum(args) @app.task() def on_error(context, exception, traceback, **kwargs): print("### on_error: ", exception) if __name__ == "__main__": result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s()) result.get() ``` start a worker and the program: ``` $ celery -A test_timeout worker -l WARNING $ python3 test_timeout.py ``` ## Expected behavior add method is called with `[1, 1]` as argument and test_timeout.py return normally ## Actual behavior The test_timeout.py fails, with ``` celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)", ``` On the worker side, the **on_error is called but the add method as well !** ``` [2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2] [2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757] [2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",) Traceback (most recent call last): File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp> callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',) [2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding [2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1] ``` Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact: - the chord result is incremented twice by the error of soft time limit - the chord result is again incremented twice by the correct returning of `test` task ## Conclusion Request.on_timeout should not process soft time limit exception. here is a quick monkey patch (correction of celery is trivial) ```python def patch_celery_request_on_timeout(): from celery.worker import request orig = request.Request.on_timeout def patched_on_timeout(self, soft, timeout): if not soft: orig(self, soft, timeout) request.Request.on_timeout = patched_on_timeout patch_celery_request_on_timeout() ``` ## version info software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis://10.0.3.253/0
2016-12-09T12:05:55
celery/celery
3,693
celery__celery-3693
[ "3678" ]
8c7ac5d84d245028f02b5d7f6a8684a1ce84dc9e
diff --git a/celery/utils/functional.py b/celery/utils/functional.py --- a/celery/utils/functional.py +++ b/celery/utils/functional.py @@ -252,10 +252,10 @@ def _argsfromspec(spec, replace_defaults=True): ', '.join(positional), ', '.join('{0}={1}'.format(k, v) for k, v in optional), '*{0}'.format(varargs) if varargs else None, - '**{0}'.format(varkw) if varkw else None, '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None, ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join('{0}="{1}"'.format(k, v) for k, v in kwonlyargs_optional), + '**{0}'.format(varkw) if varkw else None, ]))
diff --git a/t/unit/utils/test_functional.py b/t/unit/utils/test_functional.py --- a/t/unit/utils/test_functional.py +++ b/t/unit/utils/test_functional.py @@ -165,7 +165,7 @@ def f(x, y, kwarg=1): @skip.unless_python3() def test_regression_3678(self): local = {} - fun = ('def f(foo, *args, bar=""):' + fun = ('def f(foo, *args, bar="", **kwargs):' ' return foo, args, bar') exec(fun, {}, local)
Incorrect handling of tasks with keyword arguments Probably the problem is this: https://github.com/celery/celery/pull/3658 ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). - [x] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce ### celery report ``` software -> celery:4.0.1 (latentcall) kombu:4.0.1 py:3.5.2 billiard:3.5.0.2 memory:N/A platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:memory results:cache+memory:/// result_backend: 'cache+memory:///' broker_url: 'memory://localhost//' ``` ### Python code ``` from celery import Celery app = Celery(broker="memory://", backend="cache+memory://") @app.task def test_func(foo, *args, bar=''): return foo ``` ### Reproduce with ``` res = test_func.delay("") ``` ## Expected behavior AsyncResult should be returned ## Actual behavior ``` >>> test_func.delay("") Traceback (most recent call last): File "_confidential_.env/lib/python3.5/site-packages/celery/local.py", line 317, in _get_current_object return object.__getattribute__(self, '__thing') AttributeError: 'PromiseProxy' object has no attribute '__thing' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "_confidential_.env/lib/python3.5/site-packages/celery/local.py", line 146, in __getattr__ return getattr(self._get_current_object(), name) File "_confidential_.env/lib/python3.5/site-packages/celery/local.py", line 319, in _get_current_object return self.__evaluate__() File "_confidential_.env/lib/python3.5/site-packages/celery/local.py", line 349, in __evaluate__ thing = Proxy._get_current_object(self) File "_confidential_.env/lib/python3.5/site-packages/celery/local.py", line 109, in _get_current_object return loc(*self.__args, **self.__kwargs) File "_confidential_.env/lib/python3.5/site-packages/celery/app/base.py", line 453, in _task_from_fun '__header__': staticmethod(head_from_fun(fun, bound=bind)), File "_confidential_.env/lib/python3.5/site-packages/celery/utils/functional.py", line 281, in head_from_fun exec(definition, namespace) File "<string>", line 2 def test_func(foo, *args, *, bar="0"): ^ SyntaxError: invalid syntax ```
probably the fix is to change https://github.com/celery/celery/blob/master/celery/utils/functional.py#L249 to the following: ```python return ', '.join(filter(None, [ ', '.join(positional), ', '.join('{0}={1}'.format(k, v) for k, v in optional), '*{0}'.format(spec.varargs) if spec.varargs else None, '**{0}'.format(spec.varkw) if spec.varkw else None, '*' if (kwonlyargs or kwonlyargs_optional) and not spec.varargs else None, # this is the changed line ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join('{0}="{1}"'.format(k, v) for k, v in kwonlyargs_optional), ])) ```
2016-12-16T12:00:16
celery/celery
3,695
celery__celery-3695
[ "3687" ]
8c7ac5d84d245028f02b5d7f6a8684a1ce84dc9e
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py --- a/celery/worker/strategy.py +++ b/celery/worker/strategy.py @@ -30,7 +30,7 @@ def proto1_to_proto2(message, body): Tuple: of ``(body, headers, already_decoded_status, utc)`` """ try: - args, kwargs = body['args'], body['kwargs'] + args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs')
diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py --- a/t/unit/worker/test_strategy.py +++ b/t/unit/worker/test_strategy.py @@ -27,13 +27,13 @@ def setup(self): def test_message_without_args(self): self.body.pop('args') - with pytest.raises(InvalidTaskError): - proto1_to_proto2(self.message, self.body) + body, _, _, _ = proto1_to_proto2(self.message, self.body) + assert body[:2] == ((), {'foo': 'baz'}) def test_message_without_kwargs(self): self.body.pop('kwargs') - with pytest.raises(InvalidTaskError): - proto1_to_proto2(self.message, self.body) + body, _, _, _ = proto1_to_proto2(self.message, self.body) + assert body[:2] == ((1,), {}) def test_message_kwargs_not_mapping(self): self.body['kwargs'] = (2,)
Celery fails to convert tasks w/o kwargs from protocol v1 to v2 InvalidTaskError is being raised when there's no kwargs present in the message. There's actually a test (https://github.com/celery/celery/blob/master/t/unit/worker/test_strategy.py#L33-L36) ensuring that an`InvalidTaskError` is raised in case there's no kwargs in the task. This is not what used to happen in v1. Docs also say the following for kwargs: ``` Dictionary of keyword arguments. Will be an empty dictionary if not provided. ``` as you can see in http://docs.celeryproject.org/en/latest/internals/protocol.html#id8 I can open a PR if you guys think this rule should really be removed. Thanks!
Thanks for reporting this, if it differs from the previous behavior then it's a mistake!
2016-12-16T14:30:54
celery/celery
3,705
celery__celery-3705
[ "3575" ]
b5b64945daaba87a7d975b1b4fca08b0948b91bb
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py --- a/celery/backends/mongodb.py +++ b/celery/backends/mongodb.py @@ -29,6 +29,8 @@ class InvalidDocument(Exception): # noqa __all__ = ['MongoBackend'] +BINARY_CODECS = frozenset(['pickle', 'msgpack']) + class MongoBackend(BaseBackend): """MongoDB result backend. @@ -150,7 +152,12 @@ def encode(self, data): if self.serializer == 'bson': # mongodb handles serialization return data - return super(MongoBackend, self).encode(data) + payload = super(MongoBackend, self).encode(data) + + # serializer which are in a unsupported format (pickle/binary) + if self.serializer in BINARY_CODECS: + payload = Binary(payload) + return payload def decode(self, data): if self.serializer == 'bson':
mongodb result backend with pickle serialisation broken in 4.0.0 Just upgraded to celery 4.0.0. Mongodb result backend with pickle no longer works with the exception at the end of this message. This is because commit https://github.com/celery/celery/commit/639b40f6308267312a1030bb3d6ac5805069510a removes the **essential** Binary() wrapper from the data written to mongo. As a result it writes raw binary data to mongo as a string, which then rejects it. I'm using settings: CELERY_ACCEPT_CONTENT = ['pickle'] CELERY_TASK_SERIALIZER = 'pickle' CELERY_RESULT_SERIALIZER = 'pickle' Exception seen: > [2016-11-10 16:01:02,118: ERROR/MainProcess] Pool callback raised exception: InvalidStringData("strings in documents must be valid UTF-8: '\\x80\\x02N.'",) > Traceback (most recent call last): > File "/usr/lib/python2.7/site-packages/billiard/pool.py", line 1748, in safe_apply_callback > fun(*args, **kwargs) > File "/usr/lib/python2.7/site-packages/celery/worker/request.py", line 366, in on_failure > self.id, exc, request=self, store_result=self.store_errors, > File "/usr/lib/python2.7/site-packages/celery/backends/base.py", line 163, in mark_as_failure > traceback=traceback, request=request) > File "/usr/lib/python2.7/site-packages/celery/backends/base.py", line 309, in store_result > request=request, **kwargs) > File "/usr/lib/python2.7/site-packages/celery/backends/mongodb.py", line 175, in _store_result > self.collection.save(meta) > File "/usr/lib/python2.7/site-packages/pymongo/collection.py", line 2192, in save > check_keys, False, manipulate, write_concern) > File "/usr/lib/python2.7/site-packages/pymongo/collection.py", line 715, in _update > codec_options=self.__write_response_codec_options).copy() > File "/usr/lib/python2.7/site-packages/pymongo/pool.py", line 244, in command > self._raise_connection_failure(error) > File "/usr/lib/python2.7/site-packages/pymongo/pool.py", line 372, in _raise_connection_failure > raise error > InvalidStringData: strings in documents must be valid UTF-8: '\x80\x02N.'
2016-12-19T15:42:56
celery/celery
3,721
celery__celery-3721
[ "4412" ]
98222863674c7be625d9091d0301fd02a1244b07
diff --git a/celery/beat.py b/celery/beat.py --- a/celery/beat.py +++ b/celery/beat.py @@ -232,10 +232,21 @@ def adjust(self, n, drift=-0.010): def is_due(self, entry): return entry.is_due() + def _when(self, entry, next_time_to_run, mktime=time.mktime): + adjust = self.adjust + + return (mktime(entry.schedule.now().timetuple()) + + (adjust(next_time_to_run) or 0)) + + def populate_heap(self, event_t=event_t, heapify=heapq.heapify): + """Populate the heap with the data contained in the schedule.""" + self._heap = [event_t(self._when(e, e.is_due()[1]) or 0, 5, e) + for e in values(self.schedule)] + heapify(self._heap) + # pylint disable=redefined-outer-name - def tick(self, event_t=event_t, min=min, - heappop=heapq.heappop, heappush=heapq.heappush, - heapify=heapq.heapify, mktime=time.mktime): + def tick(self, event_t=event_t, min=min, heappop=heapq.heappop, + heappush=heapq.heappush): """Run a tick - one iteration of the scheduler. Executes one due task per call. @@ -243,17 +254,14 @@ def tick(self, event_t=event_t, min=min, Returns: float: preferred delay in seconds for next call. """ - def _when(entry, next_time_to_run): - return (mktime(entry.schedule.now().timetuple()) + - (adjust(next_time_to_run) or 0)) - adjust = self.adjust max_interval = self.max_interval + + if self._heap is None: + self.populate_heap() + H = self._heap - if H is None: - H = self._heap = [event_t(_when(e, e.is_due()[1]) or 0, 5, e) - for e in values(self.schedule)] - heapify(H) + if not H: return max_interval @@ -265,7 +273,7 @@ def _when(entry, next_time_to_run): if verify is event: next_entry = self.reserve(entry) self.apply_entry(entry, producer=self.producer) - heappush(H, event_t(_when(next_entry, next_time_to_run), + heappush(H, event_t(self._when(next_entry, next_time_to_run), event[1], next_entry)) return 0 else:
diff --git a/t/unit/app/test_beat.py b/t/unit/app/test_beat.py --- a/t/unit/app/test_beat.py +++ b/t/unit/app/test_beat.py @@ -6,6 +6,7 @@ from case import Mock, call, patch, skip from celery import beat from celery import uuid +from celery.beat import event_t from celery.five import keys, string_t from celery.schedules import schedule from celery.utils.objects import Bunch @@ -339,6 +340,15 @@ def test_merge_inplace(self): assert 'baz' in a.schedule assert a.schedule['bar'].schedule._next_run_at == 40 + @patch('celery.beat.Scheduler._when', return_value=1) + def test_populate_heap(self, _when): + scheduler = mScheduler(app=self.app) + scheduler.update_from_dict( + {'foo': {'schedule': mocked_schedule(True, 10)}} + ) + scheduler.populate_heap() + assert scheduler._heap == [event_t(1, 5, scheduler.schedule['foo'])] + def create_persistent_scheduler(shelv=None): if shelv is None:
Request on_timeout should ignore soft time limit exception When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller. But the task may catch this exception and eg. return (this is what soft timeout are for). This cause: 1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task 2. the task status to be passed to failure and to success on the same manner 3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks… 1, 2 and 3 can leads of course to strange race conditions… ## Steps to reproduce (Illustration) with the program in test_timeout.py: ```python import time import celery app = celery.Celery('test_timeout') app.conf.update( result_backend="redis://localhost/0", broker_url="amqp://celery:celery@localhost:5672/host", ) @app.task(soft_time_limit=1) def test(): try: time.sleep(2) except Exception: return 1 @app.task() def add(args): print("### adding", args) return sum(args) @app.task() def on_error(context, exception, traceback, **kwargs): print("### on_error: ", exception) if __name__ == "__main__": result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s()) result.get() ``` start a worker and the program: ``` $ celery -A test_timeout worker -l WARNING $ python3 test_timeout.py ``` ## Expected behavior add method is called with `[1, 1]` as argument and test_timeout.py return normally ## Actual behavior The test_timeout.py fails, with ``` celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)", ``` On the worker side, the **on_error is called but the add method as well !** ``` [2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2] [2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757] [2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",) Traceback (most recent call last): File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp> callback.delay([unpack(tup, decode) for tup in resl]) File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',) [2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error: [2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,) [2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding [2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1] ``` Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact: - the chord result is incremented twice by the error of soft time limit - the chord result is again incremented twice by the correct returning of `test` task ## Conclusion Request.on_timeout should not process soft time limit exception. here is a quick monkey patch (correction of celery is trivial) ```python def patch_celery_request_on_timeout(): from celery.worker import request orig = request.Request.on_timeout def patched_on_timeout(self, soft, timeout): if not soft: orig(self, soft, timeout) request.Request.on_timeout = patched_on_timeout patch_celery_request_on_timeout() ``` ## version info software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis://10.0.3.253/0
2016-12-23T22:27:36
celery/celery
3,730
celery__celery-3730
[ "3726" ]
892623997886dbe184ff102b0d7857eb0f298459
diff --git a/celery/app/task.py b/celery/app/task.py --- a/celery/app/task.py +++ b/celery/app/task.py @@ -850,7 +850,7 @@ def replace(self, sig): chord = None if self.request.chain: - for t in self.request.chain: + for t in reversed(self.request.chain): sig |= signature(t, app=self.app) sig.freeze(self.request.id, diff --git a/t/integration/tasks.py b/t/integration/tasks.py --- a/t/integration/tasks.py +++ b/t/integration/tasks.py @@ -13,6 +13,12 @@ def add(x, y): return x + y +@shared_task(bind=True) +def add_replaced(self, x, y): + """Add two numbers (via the add task).""" + raise self.replace(add.s(x, y)) + + @shared_task def print_unicode(log_message='hå它 valmuefrø', print_message='hiöäüß'): """Task that both logs and print strings containing funny characters."""
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py --- a/t/integration/test_canvas.py +++ b/t/integration/test_canvas.py @@ -4,7 +4,7 @@ from celery.exceptions import TimeoutError from celery.result import AsyncResult, GroupResult from .conftest import flaky -from .tasks import add, collect_ids, ids +from .tasks import add, add_replaced, collect_ids, ids TIMEOUT = 120 @@ -20,12 +20,12 @@ def test_simple_chain(self, manager): def test_complex_chain(self, manager): c = ( add.s(2, 2) | ( - add.s(4) | add.s(8) | add.s(16) + add.s(4) | add_replaced.s(8) | add.s(16) | add.s(32) ) | group(add.s(i) for i in range(4)) ) res = c() - assert res.get(timeout=TIMEOUT) == [32, 33, 34, 35] + assert res.get(timeout=TIMEOUT) == [64, 65, 66, 67] @flaky def test_parent_ids(self, manager, num=10):
Replaced task in chain causes chain to skip to last link (4.0.0–4.0.2) ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. ``` software -> celery:4.0.2 (latentcall) kombu:4.0.2 py:2.7.11 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Darwin arch:64bit imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis://localhost:6379/0 broker_url: u'amqp://guest:********@localhost:5672//' result_backend: u'redis://localhost:6379/0' ``` (The above output is against the current `master`, at commit `8926239`.) Also: ``` (foo) $ pip freeze | grep redis redis==2.10.5 ``` - [x] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce *foo.py:* ```python from celery import Celery app = Celery('foo', broker='amqp://guest:guest@localhost:5672//', backend='redis://localhost:6379/0') @app.task def a(val): return val + ' A' @app.task def b(val): return val + ' B' @app.task(bind=True) def b_(self, val): raise self.replace(b.s(val)) @app.task def c(val): return val + ' C' @app.task def d(val): return val + ' D' @app.task def e(val): return val + ' E' @app.task def f(val): return val + ' F' ``` *bar.py:* ```python from foo import a, b, b_, c, d, e, f if __name__ == '__main__': c1 = a.s('hello') | b.s() | c.s() | d.s() | e.s() | f.s() c2 = a.s('hello') | b_.s() | c.s() | d.s() | e.s() | f.s() c3 = a.s('hello') | b_.s() | c.s() print c1().get() # hello A B C D E F print c2().get() # hello A B F (expected: 'hello A B C D E F') print c3().get() # hello A B C ``` ## Expected behavior All tasks in a chain containing a replaced task should be run. ## Actual behavior Any tasks between the replaced task and the final task in the chain are not run. Only the tasks up to and including the replaced task, plus the last task in the chain, are run. Tested in 4.0.0–4.0.2 (including `master` at `8926239`).
2016-12-29T22:49:32
celery/celery
3,731
celery__celery-3731
[ "3725" ]
9d2566e9c0764ab7467db47610ccb3ee5f4303ff
diff --git a/celery/canvas.py b/celery/canvas.py --- a/celery/canvas.py +++ b/celery/canvas.py @@ -1253,7 +1253,7 @@ def apply(self, args=(), kwargs={}, propagate=True, body=None, **options): ) def _traverse_tasks(self, tasks, value=None): - stack = deque(list(tasks)) + stack = deque(tasks) while stack: task = stack.popleft() if isinstance(task, group): @@ -1262,7 +1262,9 @@ def _traverse_tasks(self, tasks, value=None): yield task if value is None else value def __length_hint__(self): - return sum(self._traverse_tasks(self.tasks, 1)) + tasks = (self.tasks.tasks if isinstance(self.tasks, group) + else self.tasks) + return sum(self._traverse_tasks(tasks, 1)) def run(self, header, body, partial_args, app=None, interval=None, countdown=1, max_retries=None, eager=False, diff --git a/t/integration/tasks.py b/t/integration/tasks.py --- a/t/integration/tasks.py +++ b/t/integration/tasks.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from time import sleep -from celery import shared_task +from celery import shared_task, group from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @@ -19,6 +19,13 @@ def add_replaced(self, x, y): raise self.replace(add.s(x, y)) +@shared_task(bind=True) +def add_to_all(self, nums, val): + """Add the given value to all supplied numbers.""" + subtasks = [add.s(num, val) for num in nums] + raise self.replace(group(*subtasks)) + + @shared_task def print_unicode(log_message='hå它 valmuefrø', print_message='hiöäüß'): """Task that both logs and print strings containing funny characters."""
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py --- a/t/integration/test_canvas.py +++ b/t/integration/test_canvas.py @@ -4,7 +4,7 @@ from celery.exceptions import TimeoutError from celery.result import AsyncResult, GroupResult from .conftest import flaky -from .tasks import add, add_replaced, collect_ids, ids +from .tasks import add, add_replaced, add_to_all, collect_ids, ids TIMEOUT = 120 @@ -88,6 +88,18 @@ def assert_ids(r, expected_value, expected_root_id, expected_parent_id): class test_chord: + @flaky + def test_group_chain(self, manager): + if not manager.app.conf.result_backend.startswith('redis'): + raise pytest.skip('Requires redis result backend.') + c = ( + add.s(2, 2) | + group(add.s(i) for i in range(4)) | + add_to_all.s(8) + ) + res = c() + assert res.get(timeout=TIMEOUT) == [12, 13, 14, 15] + @flaky def test_parent_ids(self, manager): if not manager.app.conf.result_backend.startswith('redis'):
Replaced task does not complete when replacement sig is a group (4.0.2) ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. ``` software -> celery:4.0.2 (latentcall) kombu:4.0.2 py:2.7.11 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Darwin arch:64bit imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis://localhost:6379/0 broker_url: u'amqp://guest:********@localhost:5672//' result_backend: u'redis://localhost:6379/0' ``` (The above output is against `master`.) Also: ``` (foo) $ pip freeze | grep redis redis==2.10.5 ``` - [x] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce *foo.py:* ```python from celery import Celery, group app = Celery('foo', broker='amqp://guest:guest@localhost:5672//', backend='redis://localhost:6379/0') @app.task def multiply(a, b): return a * b @app.task(bind=True) def multiply_by_two(self, num): raise self.replace(multiply.s(num, 2)) @app.task(bind=True) def multiply_all_by_two(self, nums): subtasks = [multiply_by_two.s(n) for n in nums] raise self.replace(group(*subtasks)) ``` *bar.py:* ```python from foo import multiply_by_two, multiply_all_by_two if __name__ == '__main__': print multiply_by_two.delay(3).get() # completes in 4.0.1 and 4.0.2 print multiply_all_by_two.delay([1, 2, 3]).get() # completes in 4.0.1; does not complete in 4.0.2 ``` ## Expected behavior A replaced task should complete when the replacement sig is a group (and does in 4.0.1). ## Actual behavior A replaced task does not complete when the replacement sig is a group (in 4.0.2).
2016-12-30T03:10:22
celery/celery
3,746
celery__celery-3746
[ "3745" ]
ce8ea16b8df10ea323ff2933d050578fe609b61a
diff --git a/celery/worker/consumer/consumer.py b/celery/worker/consumer/consumer.py --- a/celery/worker/consumer/consumer.py +++ b/celery/worker/consumer/consumer.py @@ -42,6 +42,8 @@ __all__ = ['Consumer', 'Evloop', 'dump_body'] CLOSE = bootsteps.CLOSE +TERMINATE = bootsteps.TERMINATE +STOP_CONDITIONS = {CLOSE, TERMINATE} logger = get_logger(__name__) debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, logger.error, logger.critical) @@ -305,7 +307,7 @@ def _limit_task(self, request, bucket, tokens): def start(self): blueprint = self.blueprint - while blueprint.state != CLOSE: + while blueprint.state not in STOP_CONDITIONS: maybe_shutdown() if self.restart_count: try: @@ -324,7 +326,7 @@ def start(self): if isinstance(exc, OSError) and exc.errno == errno.EMFILE: raise # Too many open files maybe_shutdown() - if blueprint.state != CLOSE: + if blueprint.state not in STOP_CONDITIONS: if self.connection: self.on_connection_error_after_connected(exc) else:
diff --git a/t/unit/worker/test_consumer.py b/t/unit/worker/test_consumer.py --- a/t/unit/worker/test_consumer.py +++ b/t/unit/worker/test_consumer.py @@ -10,7 +10,8 @@ from billiard.exceptions import RestartFreqExceeded from celery.worker.consumer.agent import Agent -from celery.worker.consumer.consumer import CLOSE, Consumer, dump_body +from celery.worker.consumer.consumer import (CLOSE, TERMINATE, + Consumer, dump_body) from celery.worker.consumer.gossip import Gossip from celery.worker.consumer.heart import Heart from celery.worker.consumer.mingle import Mingle @@ -153,6 +154,35 @@ def se(*args, **kwargs): c.start() sleep.assert_called_with(1) + def test_do_not_restart_when_closed(self): + c = self.get_consumer() + + c.blueprint.state = None + + def bp_start(*args, **kwargs): + c.blueprint.state = CLOSE + + c.blueprint.start.side_effect = bp_start + with patch('celery.worker.consumer.consumer.sleep'): + c.start() + + c.blueprint.start.assert_called_once_with(c) + + def test_do_not_restart_when_terminated(self): + c = self.get_consumer() + + c.blueprint.state = None + + def bp_start(*args, **kwargs): + c.blueprint.state = TERMINATE + + c.blueprint.start.side_effect = bp_start + + with patch('celery.worker.consumer.consumer.sleep'): + c.start() + + c.blueprint.start.assert_called_once_with(c) + def test_no_retry_raises_error(self): self.app.conf.broker_connection_retry = False c = self.get_consumer()
Consumer does not shutdown properly when embedded in gevent application Consider `blueprint.state != CLOSE` in https://github.com/celery/celery/blob/master/celery/worker/consumer/consumer.py#L308 and https://github.com/celery/celery/blob/master/celery/worker/consumer/consumer.py#L327 and the logic in https://github.com/celery/celery/blob/master/celery/bootsteps.py#L169 and https://github.com/celery/celery/blob/master/celery/bootsteps.py#L178 With gevent the total order is enforced by virtue of running in a loop. Therefore the problem is consistently reproducible: when `WorkerController` is stopped, the `blueprint.state` transitions from `CLOSE` to `TERMINATE`. The execution returns to `Consumer.start()` iterates in the while loop seeing that `blueprint.state != CLOSE` when in fact it is `TERMINATE` and restarts the consumer yet again preventing `WorkerController` stop. This produces the following peculiar output: ``` KeyboardInterrupt Sun Jan 8 07:34:49 2017 DEBUG 2017-01-08 07:34:55,691 bootsteps 21273 140206893775912 | Worker: Closing Pool... DEBUG 2017-01-08 07:34:55,693 bootsteps 21273 140206893775912 | Worker: stopping <step: Tasks> DEBUG 2017-01-08 07:34:55,693 tasks 21273 140206893775912 Canceling task consumer... DEBUG 2017-01-08 07:34:55,695 bootsteps 21273 140206893775912 | Worker: stopping <step: event loop> DEBUG 2017-01-08 07:34:55,696 bootsteps 21273 140206893775912 | Worker: Closing Consumer... DEBUG 2017-01-08 07:35:22,008 bootsteps 21273 140206893775912 | Worker: Stopping Consumer... DEBUG 2017-01-08 07:35:36,808 bootsteps 21273 140206893775912 | Consumer: Closing Connection... DEBUG 2017-01-08 07:35:36,808 bootsteps 21273 140206893775912 | Consumer: Closing Events... DEBUG 2017-01-08 07:35:36,809 bootsteps 21273 140206893775912 | Consumer: Closing Heart... DEBUG 2017-01-08 07:35:36,810 bootsteps 21273 140206893775912 | Consumer: Closing Tasks... DEBUG 2017-01-08 07:35:36,811 bootsteps 21273 140206893775912 | Consumer: Closing event loop... DEBUG 2017-01-08 07:35:39,092 bootsteps 21273 140206893775912 | Consumer: Stopping event loop... DEBUG 2017-01-08 07:35:39,092 bootsteps 21273 140206893775912 | Consumer: Stopping Tasks... DEBUG 2017-01-08 07:35:39,093 tasks 21273 140206893775912 Canceling task consumer... DEBUG 2017-01-08 07:35:39,093 bootsteps 21273 140206893775912 | Consumer: Stopping Heart... DEBUG 2017-01-08 07:35:39,094 bootsteps 21273 140206893775912 | Consumer: Stopping Events... DEBUG 2017-01-08 07:35:39,094 bootsteps 21273 140206893775912 | Consumer: Stopping Connection... DEBUG 2017-01-08 07:36:06,159 bootsteps 21273 140206893775912 | Worker: Stopping Pool... DEBUG 2017-01-08 07:36:11,245 bootsteps 21273 140206893775912 | Consumer: Shutdown Tasks... DEBUG 2017-01-08 07:36:11,246 tasks 21273 140206893775912 Canceling task consumer... DEBUG 2017-01-08 07:36:11,247 tasks 21273 140206893775912 Closing consumer channel... DEBUG 2017-01-08 07:36:11,247 bootsteps 21273 140206893775912 | Consumer: Shutdown Heart... DEBUG 2017-01-08 07:36:11,248 bootsteps 21273 140206893775912 | Consumer: Shutdown Events... DEBUG 2017-01-08 07:36:11,249 bootsteps 21273 140206893775912 | Consumer: Shutdown Connection... DEBUG 2017-01-08 07:36:21,035 bootsteps 21273 140206893774392 ^-- substep ok DEBUG 2017-01-08 07:38:53,621 bootsteps 21273 140206893774392 | Consumer: Starting Connection INFO 2017-01-08 07:38:53,643 connection 21273 140206893774392 Connected to django://localhost// DEBUG 2017-01-08 07:38:53,644 bootsteps 21273 140206893774392 ^-- substep ok DEBUG 2017-01-08 07:38:53,645 bootsteps 21273 140206893774392 | Consumer: Starting Events DEBUG 2017-01-08 07:38:53,657 bootsteps 21273 140206893774392 ^-- substep ok DEBUG 2017-01-08 07:38:53,657 bootsteps 21273 140206893774392 | Consumer: Starting Heart DEBUG 2017-01-08 07:38:53,658 bootsteps 21273 140206893774392 ^-- substep ok DEBUG 2017-01-08 07:38:53,658 bootsteps 21273 140206893774392 | Consumer: Starting Tasks DEBUG 2017-01-08 07:38:53,671 bootsteps 21273 140206893774392 ^-- substep ok DEBUG 2017-01-08 07:38:53,672 bootsteps 21273 140206893774392 | Consumer: Starting event loop ``` The fix is trivial, PR will be submitted.
2017-01-08T08:02:25
celery/celery
3,752
celery__celery-3752
[ "3620" ]
c1fa9af097971256f44c3e4b7d77810166a20693
diff --git a/celery/worker/loops.py b/celery/worker/loops.py --- a/celery/worker/loops.py +++ b/celery/worker/loops.py @@ -44,10 +44,10 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos, _enable_amqheartbeats(hub.timer, connection, rate=hbrate) consumer.on_message = on_task_received - consumer.consume() - obj.on_ready() obj.controller.register_with_event_loop(hub) obj.register_with_event_loop(hub) + consumer.consume() + obj.on_ready() # did_start_ok will verify that pool processes were able to start, # but this will only work the first time we start, as
Celery Worker crashing after first task with TypeError: 'NoneType' object is not callable ## Checklist - [X] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). ``` software -> celery:4.0.0 (latentcall) kombu:4.0.0 py:3.4.3 billiard:3.5.0.2 py-amqp:2.1.1 platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.default.Loader settings -> transport:amqp results:disabled ``` - [X] I have verified that the issue exists against the `master` branch of Celery. Yes I've tested and it behaves the same using master. ## Steps to reproduce Not exactly sure, because other machines with the same specs and requirements are working. ## Expected behavior Should consume tasks. ## Actual behavior A task is accepted, then a traceback is logged, then the worker reconnects to the broker for some reason. This repeats forever: ``` [2016-11-23 23:09:00,468: INFO/MainProcess] Connected to amqp://user:**@10.136.131.6:5672// [2016-11-23 23:09:00,484: INFO/MainProcess] mingle: searching for neighbors [2016-11-23 23:09:01,921: INFO/MainProcess] mingle: sync with 1 nodes [2016-11-23 23:09:01,922: INFO/MainProcess] mingle: sync complete [2016-11-23 23:09:01,970: INFO/MainProcess] Received task: tasks.calculate_user_running_total[ddd103af-d527-4564-83f8-96b747767a0c] [2016-11-23 23:09:01,972: CRITICAL/MainProcess] Unrecoverable error: TypeError("'NoneType' object is not callable",) Traceback (most recent call last): File "./venv/lib/python3.4/site-packages/celery/worker/worker.py", line 203, in start self.blueprint.start(self) File "./venv/lib/python3.4/site-packages/celery/bootsteps.py", line 119, in start step.start(parent) File "./venv/lib/python3.4/site-packages/celery/bootsteps.py", line 370, in start return self.obj.start() File "./venv/lib/python3.4/site-packages/celery/worker/consumer/consumer.py", line 318, in start blueprint.start(self) File "./venv/lib/python3.4/site-packages/celery/bootsteps.py", line 119, in start step.start(parent) File "./venv/lib/python3.4/site-packages/celery/worker/consumer/consumer.py", line 584, in start c.loop(*c.loop_args()) File "./venv/lib/python3.4/site-packages/celery/worker/loops.py", line 47, in asynloop consumer.consume() File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 470, in consume self._basic_consume(T, no_ack=no_ack, nowait=False) File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 591, in _basic_consume no_ack=no_ack, nowait=nowait) File "./venv/lib/python3.4/site-packages/kombu/entity.py", line 737, in consume arguments=self.consumer_arguments) File "./venv/lib/python3.4/site-packages/amqp/channel.py", line 1578, in basic_consume wait=None if nowait else spec.Basic.ConsumeOk, File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 73, in send_method return self.wait(wait, returns_tuple=returns_tuple) File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 93, in wait self.connection.drain_events(timeout=timeout) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 464, in drain_events return self.blocking_read(timeout) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 469, in blocking_read return self.on_inbound_frame(frame) File "./venv/lib/python3.4/site-packages/amqp/method_framing.py", line 88, in on_frame callback(channel, msg.frame_method, msg.frame_args, msg) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 473, in on_inbound_method method_sig, payload, content, File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 142, in dispatch_method listener(*args) File "./venv/lib/python3.4/site-packages/amqp/channel.py", line 1613, in _on_basic_deliver fun(msg) File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 617, in _receive_callback return on_m(message) if on_m else self.receive(decoded, message) File "./venv/lib/python3.4/site-packages/celery/worker/consumer/consumer.py", line 558, in on_task_received callbacks, File "./venv/lib/python3.4/site-packages/celery/worker/strategy.py", line 145, in task_message_handler handle(req) File "./venv/lib/python3.4/site-packages/celery/worker/worker.py", line 221, in _process_task_sem return self._quick_acquire(self._process_task, req) File "./venv/lib/python3.4/site-packages/kombu/async/semaphore.py", line 62, in acquire callback(*partial_args, **partial_kwargs) File "./venv/lib/python3.4/site-packages/celery/worker/worker.py", line 226, in _process_task req.execute_using_pool(self.pool) File "./venv/lib/python3.4/site-packages/celery/worker/request.py", line 532, in execute_using_pool correlation_id=task_id, File "./venv/lib/python3.4/site-packages/celery/concurrency/base.py", line 155, in apply_async **options) File "./venv/lib/python3.4/site-packages/billiard/pool.py", line 1487, in apply_async self._quick_put((TASK, (result._job, None, func, args, kwds))) TypeError: 'NoneType' object is not callable ``` The above lines are keep repeating every few seconds and no tasks are consumed from the queue.
The error is repeating in the log because the Celery worker daemon is crashing, so systemd restarts it. @ask, `self._quick_put` is somehow not defined. Should billiard check for a `None` value before calling, catch the exception, or should `self._quick_put` never be `None`? When I change [billiard/pool.py#L1483](https://github.com/celery/billiard/blob/16d6256dab56aa56b23d8b66d3a70b560014f317/billiard/pool.py#L1483) to `if self.threads or self._quick_put is None:` Celery does not crash anymore but for some reason the workers never process any tasks. More verbose output with logging level DEBUG: ``` [2016-11-27 14:48:09,875: DEBUG/MainProcess] | Worker: Preparing bootsteps. [2016-11-27 14:48:09,877: DEBUG/MainProcess] | Worker: Building graph... [2016-11-27 14:48:09,878: DEBUG/MainProcess] | Worker: New boot order: {Timer, Hub, Pool, Autoscaler, StateDB, Beat, Consumer} [2016-11-27 14:48:09,889: DEBUG/MainProcess] | Consumer: Preparing bootsteps. [2016-11-27 14:48:09,889: DEBUG/MainProcess] | Consumer: Building graph... [2016-11-27 14:48:09,898: DEBUG/MainProcess] | Consumer: New boot order: {Connection, Agent, Events, Mingle, Tasks, Control, Gossip, Heart, event loop} [2016-11-27 14:48:09,908: DEBUG/MainProcess] | Worker: Starting Hub [2016-11-27 14:48:09,908: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:09,908: DEBUG/MainProcess] | Worker: Starting Pool [2016-11-27 14:48:09,998: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:09,999: DEBUG/MainProcess] | Worker: Starting Consumer [2016-11-27 14:48:10,000: DEBUG/MainProcess] | Consumer: Starting Connection [2016-11-27 14:48:10,016: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'cluster_name': 'rabbit@rabbitmq', 'product': 'RabbitMQ', 'version': '3.5.6', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'capabilities': {'authentication_failure_close': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'per_consumer_qos': True, 'basic.nack': True, 'publisher_confirms': True, 'connection.blocked': True, 'exchange_exchange_bindings': True}, 'copyright': 'Copyright (C) 2007-2015 Pivotal Software, Inc.', 'platform': 'Erlang/OTP'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2016-11-27 14:48:10,018: INFO/MainProcess] Connected to amqp://user:**@10.136.131.6:5672// [2016-11-27 14:48:10,018: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:10,019: DEBUG/MainProcess] | Consumer: Starting Events [2016-11-27 14:48:10,031: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'cluster_name': 'rabbit@rabbitmq', 'product': 'RabbitMQ', 'version': '3.5.6', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'capabilities': {'authentication_failure_close': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'per_consumer_qos': True, 'basic.nack': True, 'publisher_confirms': True, 'connection.blocked': True, 'exchange_exchange_bindings': True}, 'copyright': 'Copyright (C) 2007-2015 Pivotal Software, Inc.', 'platform': 'Erlang/OTP'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2016-11-27 14:48:10,034: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:10,034: DEBUG/MainProcess] | Consumer: Starting Mingle [2016-11-27 14:48:10,035: INFO/MainProcess] mingle: searching for neighbors [2016-11-27 14:48:10,036: DEBUG/MainProcess] using channel_id: 1 [2016-11-27 14:48:10,041: DEBUG/MainProcess] Channel open [2016-11-27 14:48:10,061: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'cluster_name': 'rabbit@rabbitmq', 'product': 'RabbitMQ', 'version': '3.5.6', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'capabilities': {'authentication_failure_close': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'per_consumer_qos': True, 'basic.nack': True, 'publisher_confirms': True, 'connection.blocked': True, 'exchange_exchange_bindings': True}, 'copyright': 'Copyright (C) 2007-2015 Pivotal Software, Inc.', 'platform': 'Erlang/OTP'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2016-11-27 14:48:10,063: DEBUG/MainProcess] using channel_id: 1 [2016-11-27 14:48:10,064: DEBUG/MainProcess] Channel open [2016-11-27 14:48:11,189: INFO/MainProcess] mingle: sync with 3 nodes [2016-11-27 14:48:11,190: DEBUG/MainProcess] mingle: processing reply from celery@worker03 [2016-11-27 14:48:11,190: DEBUG/MainProcess] mingle: processing reply from celery@worker02 [2016-11-27 14:48:11,190: DEBUG/MainProcess] mingle: processing reply from celery@worker01 [2016-11-27 14:48:11,190: INFO/MainProcess] mingle: sync complete [2016-11-27 14:48:11,191: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:11,191: DEBUG/MainProcess] | Consumer: Starting Tasks [2016-11-27 14:48:11,244: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:11,244: DEBUG/MainProcess] | Consumer: Starting Control [2016-11-27 14:48:11,244: DEBUG/MainProcess] using channel_id: 2 [2016-11-27 14:48:11,246: DEBUG/MainProcess] Channel open [2016-11-27 14:48:11,251: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:11,251: DEBUG/MainProcess] | Consumer: Starting Gossip [2016-11-27 14:48:11,252: DEBUG/MainProcess] using channel_id: 3 [2016-11-27 14:48:11,253: DEBUG/MainProcess] Channel open [2016-11-27 14:48:11,257: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:11,258: DEBUG/MainProcess] | Consumer: Starting Heart [2016-11-27 14:48:11,259: DEBUG/MainProcess] using channel_id: 1 [2016-11-27 14:48:11,260: DEBUG/MainProcess] Channel open [2016-11-27 14:48:11,261: DEBUG/MainProcess] ^-- substep ok [2016-11-27 14:48:11,261: DEBUG/MainProcess] | Consumer: Starting event loop [2016-11-27 14:48:11,264: INFO/MainProcess] Received task: wakatime.tasks.cache_coding_activity[0eba267c-72e4-40ea-91dd-a1a7ab17c514] [2016-11-27 14:48:11,265: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x7ff469300950> (args:('wakatime.tasks.cache_coding_activity', '0eba267c-72e4-40ea-91dd-a1a7ab17c514', {'argsrepr': '()', 'task': 'wakatime.tasks.cache_coding_activity', 'lang': 'py', 'parent_id': '81f0c7ce-1396-496f-bf64-ae243736c845', 'timelimit': [None, None], 'root_id': '128647cc-f558-4b7d-bafc-338d186b5cfa', 'reply_to': 'e3c2b067-a058-3aa0-a3a1-384d4b917bbf', 'retries': 0, 'expires': None, 'delivery_info': {'exchange': '', 'priority': None, 'routing_key': 'cache', 'redelivered': True}, 'id': '0eba267c-72e4-40ea-91dd-a1a7ab17c514', 'correlation_id': '0eba267c-72e4-40ea-91dd-a1a7ab17c514', 'group': None, 'eta': None, 'kwargsrepr': "{'cache_projects': True, 'timeout': 15, 'user_id': UUID('d9c69ce0-f194-45a6-83cf-98f931fca8aa'), 'writes_only': False}", 'origin': 'gen3021@worker02'}, '[[], {"cache_projects": true, "timeout": 15, "user_id": "d9c69ce0-f194-45a6-83cf-98f931fca8aa", "writes_only": false}, {"callbacks": null, "chain": null, "chord": null, "errbacks": null}]', 'application/json', 'utf-8') kwargs:{}) [2016-11-27 14:48:11,266: CRITICAL/MainProcess] Unrecoverable error: TypeError("'NoneType' object is not callable",) Traceback (most recent call last): File "./venv/src/celery/celery/worker/worker.py", line 203, in start self.blueprint.start(self) File "./venv/src/celery/celery/bootsteps.py", line 119, in start step.start(parent) File "./venv/src/celery/celery/bootsteps.py", line 370, in start return self.obj.start() File "./venv/src/celery/celery/worker/consumer/consumer.py", line 318, in start blueprint.start(self) File "./venv/src/celery/celery/bootsteps.py", line 119, in start step.start(parent) File "./venv/src/celery/celery/worker/consumer/consumer.py", line 593, in start c.loop(*c.loop_args()) File "./venv/src/celery/celery/worker/loops.py", line 47, in asynloop consumer.consume() File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 470, in consume self._basic_consume(T, no_ack=no_ack, nowait=False) File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 591, in _basic_consume no_ack=no_ack, nowait=nowait) File "./venv/lib/python3.4/site-packages/kombu/entity.py", line 737, in consume arguments=self.consumer_arguments) File "./venv/lib/python3.4/site-packages/amqp/channel.py", line 1578, in basic_consume wait=None if nowait else spec.Basic.ConsumeOk, File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 73, in send_method return self.wait(wait, returns_tuple=returns_tuple) File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 93, in wait self.connection.drain_events(timeout=timeout) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 464, in drain_events return self.blocking_read(timeout) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 469, in blocking_read return self.on_inbound_frame(frame) File "./venv/lib/python3.4/site-packages/amqp/method_framing.py", line 88, in on_frame callback(channel, msg.frame_method, msg.frame_args, msg) File "./venv/lib/python3.4/site-packages/amqp/connection.py", line 473, in on_inbound_method method_sig, payload, content, File "./venv/lib/python3.4/site-packages/amqp/abstract_channel.py", line 142, in dispatch_method listener(*args) File "./venv/lib/python3.4/site-packages/amqp/channel.py", line 1613, in _on_basic_deliver fun(msg) File "./venv/lib/python3.4/site-packages/kombu/messaging.py", line 617, in _receive_callback return on_m(message) if on_m else self.receive(decoded, message) File "./venv/src/celery/celery/worker/consumer/consumer.py", line 567, in on_task_received callbacks, File "./venv/src/celery/celery/worker/strategy.py", line 145, in task_message_handler handle(req) File "./venv/src/celery/celery/worker/worker.py", line 221, in _process_task_sem return self._quick_acquire(self._process_task, req) File "./venv/lib/python3.4/site-packages/kombu/async/semaphore.py", line 62, in acquire callback(*partial_args, **partial_kwargs) File "./venv/src/celery/celery/worker/worker.py", line 226, in _process_task req.execute_using_pool(self.pool) File "./venv/src/celery/celery/worker/request.py", line 532, in execute_using_pool correlation_id=task_id, File "./venv/src/celery/celery/concurrency/base.py", line 155, in apply_async **options) File "./venv/lib/python3.4/site-packages/billiard/pool.py", line 1487, in apply_async self._quick_put((TASK, (result._job, None, func, args, kwds))) TypeError: 'NoneType' object is not callable [2016-11-27 14:48:11,273: DEBUG/MainProcess] | Worker: Closing Hub... [2016-11-27 14:48:11,274: DEBUG/MainProcess] | Worker: Closing Pool... [2016-11-27 14:48:11,274: DEBUG/MainProcess] | Worker: Closing Consumer... [2016-11-27 14:48:11,274: DEBUG/MainProcess] | Worker: Stopping Consumer... [2016-11-27 14:48:11,274: DEBUG/MainProcess] | Consumer: Closing Connection... [2016-11-27 14:48:11,275: DEBUG/MainProcess] | Consumer: Closing Events... [2016-11-27 14:48:11,275: DEBUG/MainProcess] | Consumer: Closing Mingle... [2016-11-27 14:48:11,275: DEBUG/MainProcess] | Consumer: Closing Tasks... [2016-11-27 14:48:11,275: DEBUG/MainProcess] | Consumer: Closing Control... [2016-11-27 14:48:11,275: DEBUG/MainProcess] | Consumer: Closing Gossip... [2016-11-27 14:48:11,276: DEBUG/MainProcess] | Consumer: Closing Heart... [2016-11-27 14:48:11,276: DEBUG/MainProcess] | Consumer: Closing event loop... [2016-11-27 14:48:11,276: DEBUG/MainProcess] | Consumer: Stopping event loop... [2016-11-27 14:48:11,276: DEBUG/MainProcess] | Consumer: Stopping Heart... [2016-11-27 14:48:11,277: DEBUG/MainProcess] | Consumer: Stopping Gossip... [2016-11-27 14:48:11,278: INFO/MainProcess] Received task: wakatime.tasks.cache_coding_activity[f786fc75-0518-4893-8988-ff7f063edd12] [2016-11-27 14:48:11,278: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x7ff469300950> (args:('wakatime.tasks.cache_coding_activity', 'f786fc75-0518-4893-8988-ff7f063edd12', {'argsrepr': '()', 'task': 'wakatime.tasks.cache_coding_activity', 'lang': 'py', 'parent_id': '81f0c7ce-1396-496f-bf64-ae243736c845', 'timelimit': [None, None], 'root_id': '128647cc-f558-4b7d-bafc-338d186b5cfa', 'reply_to': 'e3c2b067-a058-3aa0-a3a1-384d4b917bbf', 'retries': 0, 'expires': None, 'delivery_info': {'exchange': '', 'priority': None, 'routing_key': 'cache', 'redelivered': True}, 'id': 'f786fc75-0518-4893-8988-ff7f063edd12', 'correlation_id': 'f786fc75-0518-4893-8988-ff7f063edd12', 'group': None, 'eta': None, 'kwargsrepr': "{'cache_projects': True, 'timeout': 15, 'user_id': UUID('7056644f-2564-4074-b89e-631973879f44'), 'writes_only': False}", 'origin': 'gen3021@worker02'}, '[[], {"cache_projects": true, "timeout": 15, "user_id": "7056644f-2564-4074-b89e-631973879f44", "writes_only": false}, {"callbacks": null, "chain": null, "chord": null, "errbacks": null}]', 'application/json', 'utf-8') kwargs:{}) [2016-11-27 14:48:11,279: INFO/MainProcess] Received task: wakatime.tasks.cache_coding_activity[d5c8dc57-116c-467d-9924-e2999280c2f8] [2016-11-27 14:48:11,280: INFO/MainProcess] Received task: wakatime.tasks.cache_coding_activity[460ef864-e482-4b0f-8580-d0095750bae6] [2016-11-27 14:48:11,281: DEBUG/MainProcess] Closed channel #3 [2016-11-27 14:48:11,281: DEBUG/MainProcess] | Consumer: Stopping Control... [2016-11-27 14:48:11,283: DEBUG/MainProcess] Closed channel #2 [2016-11-27 14:48:11,283: DEBUG/MainProcess] | Consumer: Stopping Tasks... [2016-11-27 14:48:11,284: DEBUG/MainProcess] Canceling task consumer... [2016-11-27 14:48:11,286: DEBUG/MainProcess] | Consumer: Stopping Mingle... [2016-11-27 14:48:11,286: DEBUG/MainProcess] | Consumer: Stopping Events... [2016-11-27 14:48:11,286: DEBUG/MainProcess] | Consumer: Stopping Connection... [2016-11-27 14:48:11,286: DEBUG/MainProcess] | Worker: Stopping Pool... [2016-11-27 14:48:12,800: DEBUG/MainProcess] result handler: all workers terminated [2016-11-27 14:48:12,801: DEBUG/MainProcess] | Worker: Stopping Hub... [2016-11-27 14:48:12,801: DEBUG/MainProcess] | Consumer: Shutdown Heart... [2016-11-27 14:48:12,802: DEBUG/MainProcess] | Consumer: Shutdown Gossip... [2016-11-27 14:48:12,802: DEBUG/MainProcess] | Consumer: Shutdown Control... [2016-11-27 14:48:12,802: DEBUG/MainProcess] | Consumer: Shutdown Tasks... [2016-11-27 14:48:12,803: DEBUG/MainProcess] Canceling task consumer... [2016-11-27 14:48:12,803: DEBUG/MainProcess] Closing consumer channel... [2016-11-27 14:48:12,803: DEBUG/MainProcess] | Consumer: Shutdown Events... [2016-11-27 14:48:12,804: DEBUG/MainProcess] Closed channel #1 [2016-11-27 14:48:12,805: DEBUG/MainProcess] | Consumer: Shutdown Connection... [2016-11-27 14:48:12,806: DEBUG/MainProcess] Closed channel #1 [2016-11-27 14:48:12,807: DEBUG/MainProcess] removing tasks from inqueue until task handler finished ``` This was introduced with Celery 4.x because downgrading to `3.1.24` prevents the traceback. Doesn't happen to me here on Linux Python 3.4. What arguments to you use to start the worker? _quick_put should never be None btw. Does this happen at startup or always after a connection failure? I've been trying to reproduce by stopping the broker while executing tasks, and still no luck at reproducing. Always at startup. The worker arguments are: ``` /opt/app/venv/bin/python /opt/app/venv/bin/celery worker --app=wakatime.celery --workdir=/opt/app --logfile=/var/log/celery/worker.log --loglevel=INFO --concurrency=50 --exclude-queues=medium,low,cache ``` 👍 for this. Getting the very same issue, even on 4.0.1 @ask I get to reproduce it everytime when you have messages on the broker waiting to be processed when the worker comes up. This is often the case when using beat, which is my case. If the beat services comes online before the worker, you won't be able to start the worker due to the issue mentioned above. I'm using python 2.7 for all that matter and am able to reproduce it consistently. This is the same error as the one mentioned on #3539 @jmesquita that's consistent with my scenario, since my queues always have pending messages on the broker when starting the workers. @alanhamlett I'm trying to get this fixed and reading the code but I'm new to celery so it might take me sometime. What is strange to me is that with so many people using celery and celery messages being queued by default to workers, this has not exploded within the community. Makes me wonder if I'm misusing it somehow. I dug into the code a bit, `_quick_put` gets assigned by `AsyncPool._create_write_handlers`, which gets called by `AsyncPool.register_with_event_loop`, which gets called by `celery.worker.loops.asynloop`. Superficially, the problem seems to be that `asynloop` first calls `consumer.consume()` and only then calls `obj.register_with_event_loop`, which causes `_quick_put` to be `None` when it gets called from within `consume()`. This would explain why the problems does not occur when there are no messages in the queue when the event loop starts up, as then `consume()` will do nothing and the next time it gets called, `register_with_event_loop` will have been called already. I could fix this by moving obj.controller.register_with_event_loop(hub) obj.register_with_event_loop(hub) before `consumer.consume()`, though this is of course only a very naive (and possibly wrong) fix. So I worked around my problem by making celery beat messages transient, which is actually my intended behaviour anyway. I'll revisit this as soon as I have a bit more experience with Celery and it's codebase. @jmesquita: > So I worked around my problem by making celery beat messages transient That prevents the error from occurring. Thank you for the suggestion. I can only reproduce this bug if I am attempting to consume from multiple queues. If everything is consumed from a single queue then start up works as expected (messages on the queue are properly consumed). @adewes I tested your proposed solution and at least on the surface it seems to solve the problem. @adewes Can you issue a pull request so we can discuss your proposed change? Are there any updates on this issue? This is causing us major problems now, including in production. I can't even test locally because I get the `TypeError` issue. We may have to downgrade back to Celery 3. I was not able to resolve it so far, also downgraded to version 3 for now, hope the problem will be fixed soon. @thedrow my "quick fix" did not yield a complete resolution of the problem so I'm not opening a pull request. I'm not fully versed in the dataflow of the components used (there are several libraries in play here), so I'm not able to debug this further right now unfortunately. I'm actually not even sure we can downgrade because it's possible we might be relying on the new usage of message headers in the v2 task design. @ask--I'm happy to screenshare or whatever with you so you can see my exact environment to help debug, maybe even try opening up a remote debug if we need to. We're in a bit of a bind because we went all in on Celery 4 and now can't start our workers in production.
2017-01-09T20:16:29
celery/celery
3,779
celery__celery-3779
[ "3771" ]
7d1588e004875b35a8aa3e994f148413db52c845
diff --git a/celery/canvas.py b/celery/canvas.py --- a/celery/canvas.py +++ b/celery/canvas.py @@ -1282,6 +1282,9 @@ def run(self, header, body, partial_args, app=None, interval=None, group_id=group_id, chord=body, root_id=root_id).results bodyres = body.freeze(task_id, root_id=root_id) + # Chains should not be passed to the header tasks. See #3771 + options.pop('chain', None) + parent = app.backend.apply_chord( header, partial_args, group_id, body, interval=interval, countdown=countdown,
Bad behavior when a chord chains with a group ## Checklist ✓ I have included the output of ``celery -A proj report`` in the issue. ✓ I have verified that the issue exists against the `master` branch of Celery. Output of `celery -A tasks report`: ``` software -> celery:4.0.2 (latentcall) kombu:4.0.2 py:3.6.0 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Darwin arch:64bit imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:redis:/// result_backend: 'redis:///' ``` ## Steps to reproduce Create a `tasks.py` file with the following: ```python from celery import Celery, group app = Celery('tasks', backend='redis://') @app.task def echo(string): print(string) def get_sig(): before = group(echo.si('before {}'.format(i)) for i in range(3)) connect = echo.si('connect') after = group(echo.si('after {}'.format(i)) for i in range(2)) return before | connect | after ``` Then at the python prompt run: ```pycon Python 3.6.0 (default, Jan 5 2017, 23:49:18) [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import tasks; tasks.get_sig().delay() ``` Then run a celery worker with `celery -A tasks worker`, and see what output it gives. ## Expected behavior I expected to see output that indicated the three `before` tasks ran, then the `connect` task, then the two `after` tasks. ## Actual behavior This is the output that I saw from the celery worker: ``` [email protected] v4.0.2 (latentcall) Darwin-16.3.0-x86_64-i386-64bit 2017-01-16 23:03:45 [config] .> app: tasks:0x1061ec5f8 .> transport: amqp://guest:**@localhost:5672// .> results: redis:// .> concurrency: 4 (prefork) .> task events: OFF (enable -E to monitor tasks in this worker) [queues] .> celery exchange=celery(direct) key=celery [2017-01-16 23:03:47,275: WARNING/PoolWorker-4] before 1 [2017-01-16 23:03:47,276: WARNING/PoolWorker-1] before 2 [2017-01-16 23:03:47,276: WARNING/PoolWorker-2] before 0 [2017-01-16 23:03:47,370: WARNING/PoolWorker-3] after 0 [2017-01-16 23:03:47,378: WARNING/PoolWorker-2] after 0 [2017-01-16 23:03:47,380: WARNING/PoolWorker-1] after 1 [2017-01-16 23:03:47,388: WARNING/PoolWorker-1] after 0 [2017-01-16 23:03:47,388: WARNING/PoolWorker-2] after 1 [2017-01-16 23:03:47,393: WARNING/PoolWorker-1] after 1 [2017-01-16 23:03:47,397: WARNING/PoolWorker-2] connect [2017-01-16 23:03:47,403: WARNING/PoolWorker-3] after 1 [2017-01-16 23:03:47,403: WARNING/PoolWorker-1] after 0 ``` ## Impact This is working with Celery 3.1.25, and is blocking my production project from upgrading currently. If there's a workaround, I'd be happy to use it. It seems like it's running the whole `after` group for every task in the `before` group, and then once again after connect runs.
2017-01-20T20:49:33
celery/celery
3,790
celery__celery-3790
[ "3734" ]
c5793740685fa1376b2d06f3678c872ca175ed6f
diff --git a/celery/app/amqp.py b/celery/app/amqp.py --- a/celery/app/amqp.py +++ b/celery/app/amqp.py @@ -331,7 +331,9 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None, now + timedelta(seconds=expires), tz=timezone, ) eta = eta and eta.isoformat() - expires = expires and expires.isoformat() + # If we retry a task `expires` will already be ISO8601-formatted. + if not isinstance(expires, string_t): + expires = expires and expires.isoformat() if argsrepr is None: argsrepr = saferepr(args, self.argsrepr_maxsize) diff --git a/t/integration/tasks.py b/t/integration/tasks.py --- a/t/integration/tasks.py +++ b/t/integration/tasks.py @@ -55,3 +55,11 @@ def collect_ids(self, res, i): """ return res, (self.request.root_id, self.request.parent_id, i) + + +@shared_task(bind=True, expires=60.0, max_retries=1) +def retry_once(self): + """Task that fails and is retried. Returns the number of retries.""" + if self.request.retries: + return self.request.retries + raise self.retry(countdown=0.1)
diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py --- a/t/integration/test_tasks.py +++ b/t/integration/test_tasks.py @@ -1,7 +1,7 @@ from __future__ import absolute_import, unicode_literals from celery import group from .conftest import flaky -from .tasks import print_unicode, sleeping +from .tasks import print_unicode, retry_once, sleeping class test_tasks: @@ -12,6 +12,11 @@ def test_task_accepted(self, manager, sleep=1): sleeping.delay(sleep) manager.assert_accepted([r1.id]) + @flaky + def test_task_retried(self): + res = retry_once.delay() + assert res.get(timeout=10) == 1 # retried once + @flaky def test_unicode_task(self, manager): manager.join(
Retrying tasks with an expiry fails with "'str' object has no attribute 'isoformat'" ## Summary Celery fails to retry tasks with `expires` set. The problem is that celery passes a ISO8601-formatted string to `celery.app.amqp.as_task_v2()` instead of a datetime. ## Steps to reproduce 1. Create a fresh virtualenv and install celery from master. 2. Create tasks.py with the following content: ``` from celery import Celery app = Celery('tasks', broker='pyamqp://') @app.task(bind=True, expires=60.0, max_retries=1) def randint(self): if self.request.retries == 0: raise self.retry(countdown=0.1) return 42 ``` 3. Trigger the job: ``` $ python -c 'import tasks; tasks.randint.delay()' $ celery -A tasks worker --loglevel=info ``` ## Expected behavior The task is retried once, then returns 42. Ex. it should log something like the following: ``` ... startup message omitted ... [2017-01-03 12:04:40,705: INFO/MainProcess] Connected to amqp://guest:**@127.0.0.1:5672// [2017-01-03 12:04:40,715: INFO/MainProcess] mingle: searching for neighbors [2017-01-03 12:04:41,734: INFO/MainProcess] mingle: all alone [2017-01-03 12:04:41,746: INFO/MainProcess] celery@mrslim ready. [2017-01-03 12:04:41,747: INFO/MainProcess] Received task: tasks.randint[2eaa60ce-28d8-435d-8aec-2f0fed8afa28] [2017-01-03 12:04:41,872: INFO/PoolWorker-4] Task tasks.randint[2eaa60ce-28d8-435d-8aec-2f0fed8afa28] retry: Retry in 0.1s [2017-01-03 12:04:41,872: INFO/MainProcess] Received task: tasks.randint[2eaa60ce-28d8-435d-8aec-2f0fed8afa28] ETA:[2017-01-03 20:04:41.953403+00:00] [2017-01-03 12:04:42,717: INFO/PoolWorker-2] Task tasks.randint[2eaa60ce-28d8-435d-8aec-2f0fed8afa28] succeeded in 0.000306855999952s: 42 ``` ## Actual behavior The retry call fails and the job is never completed. It logs the following instead: ``` ... startup message omitted ... [2017-01-03 12:05:40,585: INFO/MainProcess] Connected to amqp://guest:**@127.0.0.1:5672// [2017-01-03 12:05:40,594: INFO/MainProcess] mingle: searching for neighbors [2017-01-03 12:05:41,615: INFO/MainProcess] mingle: all alone [2017-01-03 12:05:41,631: INFO/MainProcess] celery@mrslim ready. [2017-01-03 12:05:41,632: INFO/MainProcess] Received task: tasks.randint[95e4d770-19f7-44d3-a052-738f293036c5] expires:[2017-01-03 20:06:40.130848+00:00] [2017-01-03 12:05:41,735: WARNING/PoolWorker-4] Task tasks.randint[95e4d770-19f7-44d3-a052-738f293036c5] reject requeue=False: 'str' object has no attribute 'isoformat' Traceback (most recent call last): File "/home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/trace.py", line 367, in trace_task R = retval = fun(*args, **kwargs) File "/home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/trace.py", line 622, in __protected_call__ return self.run(*args, **kwargs) File "/home/bremac/src/celery_test/tasks.py", line 9, in randint raise self.retry(countdown=0.1) File "/home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/task.py", line 687, in retry raise Reject(exc, requeue=False) Reject: (AttributeError("'str' object has no attribute 'isoformat'",), False) ``` Pdb gives me the following backtrace for the original exception. I've omitted everything above the call to `self.retry` for brevity: ``` /home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/task.py(685)retry() -> S.apply_async() /home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/canvas.py(221)apply_async() -> return _apply(args, kwargs, **options) /home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/task.py(535)apply_async() -> **options /home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/base.py(729)send_task() -> root_id, parent_id, shadow, chain, /home/bremac/src/celery_test/venv/lib/python2.7/site-packages/celery/app/amqp.py(334)as_task_v2() -> expires = expires and expires.isoformat() ``` ## Other information ``` $ celery -A tasks report # installed from a checkout on master software -> celery:4.0.2 (latentcall) kombu:4.0.2 py:2.7.13 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Linux arch:64bit imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:pyamqp results:disabled broker_url: u'amqp://guest:********@localhost:5672//' ```
2017-01-25T02:43:49
celery/celery
3,827
celery__celery-3827
[ "3826" ]
9f7adf4329b3ec668d0c1babc9a80ef9897d76f8
diff --git a/celery/schedules.py b/celery/schedules.py --- a/celery/schedules.py +++ b/celery/schedules.py @@ -101,6 +101,11 @@ def to_local(self, dt): return timezone.to_local_fallback(dt) return dt + def __eq__(self, other): + if isinstance(other, BaseSchedule): + return other.nowfun == self.nowfun + return NotImplemented + @python_2_unicode_compatible class schedule(BaseSchedule): @@ -398,6 +403,7 @@ def __init__(self, minute='*', hour='*', day_of_week='*', self._orig_day_of_week = cronfield(day_of_week) self._orig_day_of_month = cronfield(day_of_month) self._orig_month_of_year = cronfield(month_of_year) + self._orig_kwargs = kwargs self.hour = self._expand_cronspec(hour, 24) self.minute = self._expand_cronspec(minute, 60) self.day_of_week = self._expand_cronspec(day_of_week, 7) @@ -529,7 +535,12 @@ def __reduce__(self): self._orig_hour, self._orig_day_of_week, self._orig_day_of_month, - self._orig_month_of_year), None) + self._orig_month_of_year), self._orig_kwargs) + + def __setstate__(self, state): + # Calling super's init because the kwargs aren't necessarily passed in + # the same form as they are stored by the superclass + super(crontab, self).__init__(**state) def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): # pylint: disable=redefined-outer-name @@ -624,7 +635,8 @@ def __eq__(self, other): other.day_of_month == self.day_of_month and other.day_of_week == self.day_of_week and other.hour == self.hour and - other.minute == self.minute + other.minute == self.minute and + super(crontab, self).__eq__(other) ) return NotImplemented
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py --- a/t/unit/app/test_schedules.py +++ b/t/unit/app/test_schedules.py @@ -91,13 +91,28 @@ def test_pickle(self): assert s1 == s2 +# This is needed for test_crontab_parser because datetime.utcnow doesn't pickle +# in python 2 +def utcnow(): + return datetime.utcnow() + + class test_crontab_parser: def crontab(self, *args, **kwargs): return crontab(*args, **dict(kwargs, app=self.app)) def test_crontab_reduce(self): - assert loads(dumps(self.crontab('*'))) + c = self.crontab('*') + assert c == loads(dumps(c)) + c = self.crontab( + minute='1', + hour='2', + day_of_week='3', + day_of_month='4', + month_of_year='5', + nowfun=utcnow) + assert c == loads(dumps(c)) def test_range_steps_not_enough(self): with pytest.raises(crontab_parser.ParseException):
celery.schedules.crontab.__reduce__ does not return kwargs. ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). - [X] I have verified that the issue exists against the `master` branch of Celery. ``` software -> celery:4.0.2 (latentcall) kombu:4.0.2 py:3.5.2 billiard:3.5.0.2 py-amqp:2.1.4 platform -> system:Linux arch:64bit, ELF imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:amqp results:disabled timezone: 'UTC' beat_schedule: { 'test task': { 'args': ('hello',), 'schedule': <crontab: 35 18 * * * (m/h/d/dM/MY)>, 'task': 'test.test'}, 'test task2': { 'args': ('world',), 'schedule': <crontab: 35 15 * * * (m/h/d/dM/MY)>, 'task': 'test.test'}} accept_content: ['json'] task_serializer: 'json' result_serializer: 'json' ``` ## Steps to reproduce Set a nowfun. The nowfun will not be restored. When starting up celery beat, I never get a working nowfun back. ## Expected behavior relevant kwargs should be returned instead of `None` here: https://github.com/celery/celery/blob/master/celery/schedules.py#L532. An appropriate `__setstate__` may also be necessary to cope with this (as `app` doesn't get stored in `__dict__` as `app`, but rather `_app`). ## Actual behavior Only the constructor elements are returned from `__reduce__`, dropping all kwargs passed to the original `crontab.__init__` when pickling.
2017-02-11T00:28:52
celery/celery
3,831
celery__celery-3831
[ "3830" ]
d90caee6d91a0fcc91756329503a35bf8fef720a
diff --git a/celery/app/defaults.py b/celery/app/defaults.py --- a/celery/app/defaults.py +++ b/celery/app/defaults.py @@ -152,6 +152,7 @@ def __repr__(self): redis=Namespace( __old__=old_ns('celery_redis'), + backend_use_ssl=Option(type='dict'), db=Option(type='int'), host=Option(type='string'), max_connections=Option(type='int'), diff --git a/celery/backends/redis.py b/celery/backends/redis.py --- a/celery/backends/redis.py +++ b/celery/backends/redis.py @@ -132,6 +132,15 @@ def __init__(self, host=None, port=None, db=None, password=None, 'socket_connect_timeout': socket_connect_timeout and float(socket_connect_timeout), } + + # "redis_backend_use_ssl" must be a dict with the keys: + # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' + # (the same as "broker_use_ssl") + ssl = _get('redis_backend_use_ssl') + if ssl: + self.connparams.update(ssl) + self.connparams['connection_class'] = redis.SSLConnection + if url: self.connparams = self._params_from_url(url, self.connparams) self.url = url
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py --- a/t/unit/backends/test_redis.py +++ b/t/unit/backends/test_redis.py @@ -1,5 +1,6 @@ from __future__ import absolute_import, unicode_literals import pytest +import ssl from datetime import timedelta from contextlib import contextmanager from pickle import loads, dumps @@ -179,6 +180,34 @@ def test_socket_url(self): assert 'socket_connect_timeout' not in x.connparams assert x.connparams['db'] == 3 + @skip.unless_module('redis') + def test_backend_ssl(self): + self.app.conf.redis_backend_use_ssl = { + 'ssl_cert_reqs': ssl.CERT_REQUIRED, + 'ssl_ca_certs': '/path/to/ca.crt', + 'ssl_certfile': '/path/to/client.crt', + 'ssl_keyfile': '/path/to/client.key', + } + self.app.conf.redis_socket_timeout = 30.0 + self.app.conf.redis_socket_connect_timeout = 100.0 + x = self.Backend( + 'redis://:[email protected]:123//1', app=self.app, + ) + assert x.connparams + assert x.connparams['host'] == 'vandelay.com' + assert x.connparams['db'] == 1 + assert x.connparams['port'] == 123 + assert x.connparams['password'] == 'bosco' + assert x.connparams['socket_timeout'] == 30.0 + assert x.connparams['socket_connect_timeout'] == 100.0 + assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED + assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt' + assert x.connparams['ssl_certfile'] == '/path/to/client.crt' + assert x.connparams['ssl_keyfile'] == '/path/to/client.key' + + from redis.connection import SSLConnection + assert x.connparams['connection_class'] is SSLConnection + def test_compat_propertie(self): x = self.Backend( 'redis://:[email protected]:123//1', app=self.app,
Redis backend does not support SSL? From looking at the code and documentation, it appears that SSL is only supported for redis as a broker, but not as a result backend. Is that right? Currently we do some yucky monkey-patching of the code in our tasks file: ```python import celery.backends.redis import redis import ssl original = celery.backends.redis.RedisBackend._params_from_url def patched(*args, **kwargs): result = original(*args, **kwargs) result.update({ 'connection_class': redis.SSLConnection, 'ssl_cert_reqs': ssl.CERT_NONE, }) return result celery.backends.redis.RedisBackend._params_from_url = patched ``` The code change in celery to support SSL for redis backends would be small. Would you accept a patch for that? It should probably use a different config name than `broker_use_ssl`. Maybe `backend_use_ssl`?
2017-02-11T20:08:05
celery/celery
3,850
celery__celery-3850
[ "3849" ]
4d63867c8281e94c74dcdf84fe2a441eaed6671b
diff --git a/celery/utils/time.py b/celery/utils/time.py --- a/celery/utils/time.py +++ b/celery/utils/time.py @@ -322,9 +322,10 @@ def maybe_make_aware(dt, tz=None): """Convert dt to aware datetime, do nothing if dt is already aware.""" if is_naive(dt): dt = to_utc(dt) - return localize( - dt, timezone.utc if tz is None else timezone.tz_or_local(tz), - ) + return localize( + dt, timezone.utc if tz is None else timezone.tz_or_local(tz), + ) + return dt @python_2_unicode_compatible
diff --git a/t/unit/utils/test_time.py b/t/unit/utils/test_time.py --- a/t/unit/utils/test_time.py +++ b/t/unit/utils/test_time.py @@ -171,6 +171,13 @@ def test_maybe_make_aware(self): assert maybe_make_aware(aware) naive = datetime.utcnow() assert maybe_make_aware(naive) + assert maybe_make_aware(naive).tzinfo is pytz.utc + + tz = pytz.timezone('US/Eastern') + eastern = datetime.utcnow().replace(tzinfo=tz) + assert maybe_make_aware(eastern).tzinfo is tz + utcnow = datetime.utcnow() + assert maybe_make_aware(utcnow, 'UTC').tzinfo is pytz.utc class test_localize:
celery.utils.time.maybe_make_aware docstring incorrect The docstring for [celery.utils.time.maybe_make_aware](https://github.com/celery/celery/blob/master/celery/utils/time.py#L321) claims that it doesn't do anything if dt is already timezone-aware, but this is incorrect, as it will try to convert to the localtime or utc instead. This doesn't look like correct behavior, and doesn't allow timezones to work correctly in the crontab scheduler.
2017-02-17T22:13:26
celery/celery
3,892
celery__celery-3892
[ "3879" ]
144f88b4e1be21780e737d4be5a734b19f1cf511
diff --git a/celery/utils/collections.py b/celery/utils/collections.py --- a/celery/utils/collections.py +++ b/celery/utils/collections.py @@ -3,7 +3,7 @@ from __future__ import absolute_import, unicode_literals import sys -import time +from celery.five import monotonic from collections import ( Callable, Mapping, MutableMapping, MutableSet, Sequence, @@ -526,7 +526,7 @@ class LimitedSet(object): False >>> len(s) # maxlen is reached 50000 - >>> s.purge(now=time.time() + 7200) # clock + 2 hours + >>> s.purge(now=monotonic() + 7200) # clock + 2 hours >>> len(s) # now only minlen items are cached 4000 >>>> 57000 in s # even this item is gone now @@ -573,7 +573,7 @@ def clear(self): def add(self, item, now=None): # type: (Any, float) -> None """Add a new item, or reset the expiry time of an existing item.""" - now = now or time.time() + now = now or monotonic() if item in self._data: self.discard(item) entry = (now, item) @@ -624,7 +624,7 @@ def purge(self, now=None): now (float): Time of purging -- by default right now. This can be useful for unit testing. """ - now = now or time.time() + now = now or monotonic() now = now() if isinstance(now, Callable) else now if self.maxlen: while len(self._data) > self.maxlen:
diff --git a/t/unit/utils/test_collections.py b/t/unit/utils/test_collections.py --- a/t/unit/utils/test_collections.py +++ b/t/unit/utils/test_collections.py @@ -5,7 +5,7 @@ from collections import Mapping from itertools import count -from time import time +from celery.five import monotonic from case import skip from billiard.einfo import ExceptionInfo @@ -198,21 +198,21 @@ def test_purge(self): s = LimitedSet(maxlen=10, expires=1) [s.add(i) for i in range(10)] s.maxlen = 2 - s.purge(now=time() + 100) + s.purge(now=monotonic() + 100) assert len(s) == 0 # not expired s = LimitedSet(maxlen=None, expires=1) [s.add(i) for i in range(10)] s.maxlen = 2 - s.purge(now=lambda: time() - 100) + s.purge(now=lambda: monotonic() - 100) assert len(s) == 2 # expired -> minsize s = LimitedSet(maxlen=10, minlen=10, expires=1) [s.add(i) for i in range(20)] s.minlen = 3 - s.purge(now=time() + 3) + s.purge(now=monotonic() + 3) assert s.minlen == len(s) assert len(s._heap) <= s.maxlen * ( 100. + s.max_heap_percent_overload) / 100 @@ -293,8 +293,6 @@ def test_update(self): def test_iterable_and_ordering(self): s = LimitedSet(maxlen=35, expires=None) - # we use a custom clock here, as time.time() does not have enough - # precision when called quickly (can return the same value twice). clock = count(1) for i in reversed(range(15)): s.add(i, now=next(clock))
LimitedSet does not perform as expected - tests fail ## Checklist - Celery Version v3.1.25 - [ X] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce Run unit tests ## Expected behavior That they'd pass :wink: ## Actual behavior Why they all pass except the ones related to LimitedSet The results appear to be the absolute invert of what is expected; have a look; here is the output: ``` __________________________________________________ test_LimitedSet.test_add ___________________________________________________ self = <celery.tests.utils.test_datastructures.test_LimitedSet testMethod=test_add> def test_add(self): if sys.platform == 'win32': raise SkipTest('Not working properly on Windows') s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') for n in 'foo', 'bar': self.assertIn(n, s) s.add('baz') for n in 'bar', 'baz': > self.assertIn(n, s) E AssertionError: 'bar' not found in LimitedSet(2) celery/tests/utils/test_datastructures.py:185: AssertionError __________________________________________________ test_LimitedSet.test_iter __________________________________________________ self = <celery.tests.utils.test_datastructures.test_LimitedSet testMethod=test_iter> def test_iter(self): if sys.platform == 'win32': raise SkipTest('Not working on Windows') s = LimitedSet(maxlen=3) items = ['foo', 'bar', 'baz', 'xaz'] for item in items: s.add(item) l = list(iter(s)) for item in items[1:]: > self.assertIn(item, l) E AssertionError: 'bar' not found in ['baz', 'foo', 'xaz'] celery/tests/utils/test_datastructures.py:242: AssertionError _________________________________________________ test_LimitedSet.test_update _________________________________________________ self = <celery.tests.utils.test_datastructures.test_LimitedSet testMethod=test_update> def test_update(self): s1 = LimitedSet(maxlen=2) s1.add('foo') s1.add('bar') s2 = LimitedSet(maxlen=2) s2.update(s1) self.assertItemsEqual(list(s2), ['foo', 'bar']) s2.update(['bla']) > self.assertItemsEqual(list(s2), ['bla', 'bar']) celery/tests/utils/test_datastructures.py:280: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ celery/tests/case.py:367: in assertItemsEqual return self.assertSequenceEqual(expected, actual, msg=msg) E AssertionError: Sequences differ: ['bla', 'foo'] != ['bar', 'bla'] E E First differing element 0: E bla E bar E E - ['bla', 'foo'] E + ['bar', 'bla'] ``` Just to give you more details on my environment (in case it's related): * I'm using CentOS 7.x putting my on Python 2.7.5 * billiard v3.3.0.20 * kombu v3.0.37 * pyparsing v1.5.6 * pytest v3.0.6 * dateutil v2.2-1 * anyjson v0.3.3 * amqp v2.1.4 * pytz v2016.10 * uuid v1.30
2017-03-04T05:24:15
celery/celery
3,903
celery__celery-3903
[ "3858" ]
144f88b4e1be21780e737d4be5a734b19f1cf511
diff --git a/celery/backends/base.py b/celery/backends/base.py --- a/celery/backends/base.py +++ b/celery/backends/base.py @@ -29,7 +29,7 @@ from celery.exceptions import ( ChordError, TimeoutError, TaskRevokedError, ImproperlyConfigured, ) -from celery.five import items +from celery.five import items, string from celery.result import ( GroupResult, ResultBase, allow_join_result, result_from_tuple, ) @@ -237,7 +237,7 @@ def prepare_exception(self, exc, serializer=None): serializer = self.serializer if serializer is None else serializer if serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) - return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} + return {'exc_type': type(exc).__name__, 'exc_message': string(exc)} def exception_to_python(self, exc): """Convert serialized exception to Python exception."""
diff --git a/t/unit/backends/test_base.py b/t/unit/backends/test_base.py --- a/t/unit/backends/test_base.py +++ b/t/unit/backends/test_base.py @@ -139,6 +139,11 @@ def test_regular(self): y = self.b.exception_to_python(x) assert isinstance(y, KeyError) + def test_unicode_message(self): + message = u'\u03ac' + x = self.b.prepare_exception(Exception(message)) + assert x == {'exc_message': message, 'exc_type': 'Exception'} + class KVBackend(KeyValueStoreBackend): mget_returns_dict = False
UnicodeDecodeError when storing exception result in backend ## Output of ``celery -A proj report`` ``` -------------- celery@kubuntu-work v4.0.2 (latentcall) ---- **** ----- --- * *** * -- Linux-3.19.0-15-generic-x86_64-with-Ubuntu-15.04-vivid 2017-02-22 09:58:51 -- * - **** --- - ** ---------- [config] - ** ---------- .> app: __main__:0x7fd477a8cd10 - ** ---------- .> transport: redis://localhost:6379/0 - ** ---------- .> results: redis://localhost/1 - *** --- * --- .> concurrency: 2 (prefork) -- ******* ---- .> task events: OFF (enable -E to monitor tasks in this worker) --- ***** ----- -------------- [queues] .> report exchange=report(direct) key=report [2017-02-22 09:59:14,653: ERROR/PoolWorker-2] Task proj.test[ca8aa518-dd4f-4697-a7f4-b90a11036fd2] raised unexpected: UnicodeEncodeError('ascii', u'\U0001f41f \U0001f421 \U0001f42c', 0, 1, 'ordinal not in range(128)') Traceback (most recent call last): File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/app/trace.py", line 381, in trace_task I, R, state, retval = on_error(task_request, exc, uuid) File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/app/trace.py", line 323, in on_error task, request, eager=eager, call_errbacks=call_errbacks, File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/app/trace.py", line 157, in handle_error_state call_errbacks=call_errbacks) File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/app/trace.py", line 202, in handle_failure call_errbacks=call_errbacks, File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/backends/base.py", line 163, in mark_as_failure traceback=traceback, request=request) File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/backends/base.py", line 308, in store_result result = self.encode_result(result, state) File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/backends/base.py", line 298, in encode_result return self.prepare_exception(result) File "/home/yangfei/work/code/pyvenv/venv/local/lib/python2.7/site-packages/celery/backends/base.py", line 241, in prepare_exception return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} UnicodeEncodeError: 'ascii' codec can't encode character u'\U0001f41f' in position 0: ordinal not in range(128) ``` ## Steps to reproduce 1. create proj.py, celery app starts with a backend. ``test`` task raises an Exception with a unicode. ``` # coding: utf-8 from celery import Celery app = Celery(broker='redis://localhost/0', backend='redis://localhost/1') app.conf.task_queue_default = 'report' @app.task def test(msg): raise Exception(u'🐟 🐡 🐬') ``` 2. run celery worker with ``celery -A proj worker -Q report 3. dispatch a test task ``` from proj import test test.apply_async(args=['hello'], queue='report') ``` ## Expected behavior save task result (with exception detail from task) in backend successfully. ## Actual behavior backend stores the exception from celery itself.
2017-03-11T15:44:12
celery/celery
3,934
celery__celery-3934
[ "3763" ]
7cf709d63e517c4223ae6df508db8789184a57eb
diff --git a/celery/app/amqp.py b/celery/app/amqp.py --- a/celery/app/amqp.py +++ b/celery/app/amqp.py @@ -521,7 +521,7 @@ def send_task_message(producer, name, message, exchange_type = 'direct' # convert to anon-exchange, when exchange not set and direct ex. - if not exchange or not routing_key and exchange_type == 'direct': + if (not exchange or not routing_key) and exchange_type == 'direct': exchange, routing_key = '', qname elif exchange is None: # not topic exchange, and exchange not undefined
diff --git a/t/unit/app/test_amqp.py b/t/unit/app/test_amqp.py --- a/t/unit/app/test_amqp.py +++ b/t/unit/app/test_amqp.py @@ -264,6 +264,41 @@ def test_send_task_message__queue_string(self): assert kwargs['routing_key'] == 'foo' assert kwargs['exchange'] == '' + def test_send_task_message__broadcast_without_exchange(self): + from kombu.common import Broadcast + evd = Mock(name='evd') + self.app.amqp.send_task_message( + Mock(), 'foo', self.simple_message, retry=False, + routing_key='xyz', queue=Broadcast('abc'), + event_dispatcher=evd, + ) + evd.publish.assert_called() + event = evd.publish.call_args[0][1] + assert event['routing_key'] == 'xyz' + assert event['exchange'] == 'abc' + + def test_send_event_exchange_direct_with_exchange(self): + prod = Mock(name='prod') + self.app.amqp.send_task_message( + prod, 'foo', self.simple_message_no_sent_event, queue='bar', + retry=False, exchange_type='direct', exchange='xyz', + ) + prod.publish.assert_called() + pub = prod.publish.call_args[1] + assert pub['routing_key'] == 'bar' + assert pub['exchange'] == '' + + def test_send_event_exchange_direct_with_routing_key(self): + prod = Mock(name='prod') + self.app.amqp.send_task_message( + prod, 'foo', self.simple_message_no_sent_event, queue='bar', + retry=False, exchange_type='direct', routing_key='xyb', + ) + prod.publish.assert_called() + pub = prod.publish.call_args[1] + assert pub['routing_key'] == 'bar' + assert pub['exchange'] == '' + def test_send_event_exchange_string(self): evd = Mock(name='evd') self.app.amqp.send_task_message(
Broadcast messages in Celery 4.0.1 + Django not worked for me ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). - [x] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce I'm using django 1.8 + Celery 4.0.1 + Kombu 4.0.1 + Redis as broker from kombu.common import Broadcast CELERY_QUEUE_BROADCAST = 'broadcast' CELERY_QUEUES = (Broadcast(queue=CELERY_QUEUE_BROADCAST), ) @task(ignore_result=True, queue=CELERY_QUEUE_BROADCAST) def broadcast_task(): print "task runned" started two workers celery -A proj worker -Q broadcast broadcast_task.delay() ## Expected behavior tasks are performed by each worker at same time ## Actual behavior tasks performed by one worker at same time also i placed my question on SO http://stackoverflow.com/questions/41639334/broadcast-messages-in-celery-4-x
Duplicate celery/celery#3740? Maybe. But it's bad. I hope my fault in incorrect settings.
2017-03-23T18:00:44
celery/celery
3,952
celery__celery-3952
[ "3863", "3609" ]
4f6c3b5d184455b0fdacd7968bbba53583dddb03
diff --git a/celery/utils/functional.py b/celery/utils/functional.py --- a/celery/utils/functional.py +++ b/celery/utils/functional.py @@ -266,7 +266,11 @@ def head_from_fun(fun, bound=False, debug=False): # in pure-Python. Instead we use exec to create a new function # with an empty body, meaning it has the same performance as # as just calling a function. - if not inspect.isfunction(fun) and hasattr(fun, '__call__'): + is_function = inspect.isfunction(fun) + is_callable = hasattr(fun, '__call__') + is_method = inspect.ismethod(fun) + + if not is_function and is_callable and not is_method: name, fun = fun.__class__.__name__, fun.__call__ else: name = fun.__name__
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py --- a/t/integration/test_canvas.py +++ b/t/integration/test_canvas.py @@ -30,6 +30,8 @@ def test_complex_chain(self, manager): @flaky def test_group_chord_group_chain(self, manager): + from celery.five import bytes_if_py2 + if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = StrictRedis() @@ -40,11 +42,17 @@ def test_group_chord_group_chain(self, manager): result = (before | connect | after).delay() result.get(timeout=TIMEOUT) - redis_messages = redis_connection.lrange('redis-echo', 0, -1) - assert set(['before 0', 'before 1', 'before 2']) == \ - set(redis_messages[:3]) - assert redis_messages[3] == 'connect' - assert set(redis_messages[4:]) == set(['after 0', 'after 1']) + redis_messages = list(map( + bytes_if_py2, + redis_connection.lrange('redis-echo', 0, -1) + )) + before_items = \ + set(map(bytes_if_py2, (b'before 0', b'before 1', b'before 2'))) + after_items = set(map(bytes_if_py2, (b'after 0', b'after 1'))) + + assert set(redis_messages[:3]) == before_items + assert redis_messages[3] == b'connect' + assert set(redis_messages[4:]) == after_items redis_connection.delete('redis-echo') @flaky diff --git a/t/unit/utils/test_functional.py b/t/unit/utils/test_functional.py --- a/t/unit/utils/test_functional.py +++ b/t/unit/utils/test_functional.py @@ -205,6 +205,18 @@ def test_from_fun_forced_kwargs(self): g(a=1, b=2) g(a=1, b=2, c=3) + def test_classmethod(self): + class A(object): + @classmethod + def f(cls, x): + return x + + fun = head_from_fun(A.f, bound=False) + assert fun(A, 1) == 1 + + fun = head_from_fun(A.f, bound=True) + assert fun(1) == 1 + class test_fun_takes_argument:
Tasks created from classmethods no longer work ## Checklist **Version info:** * celery 4.0.2 (latentcall) * kombu 4.0.2 * py 2.7.13 * billiard 3.5.0.2 **Verified against celery/master:** Yes ## Steps to reproduce Save as reproducer.py and then run with ```celery -A reproducer worker``` ``` from celery import Celery, task, Task app = Celery('hello', broker='qpid://localhost/') class ReproducerClass: @classmethod def task_classmethod(cls): return 'hello world' @staticmethod def task_staticmethod(): return 'hello world' # this works statictask = task(ReproducerClass.task_staticmethod, base=Task) # this blows up classtask = task(ReproducerClass.task_classmethod, base=Task) ``` ## Expected behavior class method task will function as a normal task, as it did in Celery 3.1.x ## Actual behavior ``` Traceback (most recent call last): File "/usr/bin/celery", line 11, in <module> sys.exit(main()) File "/usr/lib/python2.7/site-packages/celery/__main__.py", line 14, in main _main() File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 326, in main cmd.execute_from_commandline(argv) File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 488, in execute_from_commandline super(CeleryCommand, self).execute_from_commandline(argv))) File "/usr/lib/python2.7/site-packages/celery/bin/base.py", line 281, in execute_from_commandline return self.handle_argv(self.prog_name, argv[1:]) File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 480, in handle_argv return self.execute(command, argv) File "/usr/lib/python2.7/site-packages/celery/bin/celery.py", line 412, in execute ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) File "/usr/lib/python2.7/site-packages/celery/bin/worker.py", line 221, in run_from_argv return self(*args, **options) File "/usr/lib/python2.7/site-packages/celery/bin/base.py", line 244, in __call__ ret = self.run(*args, **kwargs) File "/usr/lib/python2.7/site-packages/celery/bin/worker.py", line 255, in run **kwargs) File "/usr/lib/python2.7/site-packages/celery/worker/worker.py", line 95, in __init__ self.on_before_init(**kwargs) File "/usr/lib/python2.7/site-packages/celery/apps/worker.py", line 97, in on_before_init trace.setup_worker_optimizations(self.app, self.hostname) File "/usr/lib/python2.7/site-packages/celery/app/trace.py", line 568, in setup_worker_optimizations app.finalize() File "/usr/lib/python2.7/site-packages/celery/app/base.py", line 512, in finalize _announce_app_finalized(self) File "/usr/lib/python2.7/site-packages/celery/_state.py", line 52, in _announce_app_finalized callback(app) File "/usr/lib/python2.7/site-packages/celery/app/base.py", line 412, in cons return app._task_from_fun(fun, **opts) File "/usr/lib/python2.7/site-packages/celery/app/base.py", line 453, in _task_from_fun '__header__': staticmethod(head_from_fun(fun, bound=bind)), File "/usr/lib/python2.7/site-packages/celery/utils/functional.py", line 275, in head_from_fun fun_args=_argsfromspec(getfullargspec(fun)), File "/usr/lib/python2.7/site-packages/vine/five.py", line 350, in getfullargspec s = _getargspec(fun) File "/usr/lib64/python2.7/inspect.py", line 815, in getargspec raise TypeError('{!r} is not a Python function'.format(func)) TypeError: <method-wrapper '__call__' of instancemethod object at 0x7f6ba1d7b410> is not a Python function ``` As you can see, the entire traceback is within Celery, leaving no indication of where the problem might be. After debugging, we found that the deficiency is [here](https://github.com/celery/celery/blob/1992cb07f1d4de44ab80ff7ee8ab93614517d7ae/celery/utils/functional.py#L269): Here is the exerpt: ``` if not inspect.isfunction(fun) and hasattr(fun, '__call__'): name, fun = fun.__class__.__name__, fun.__call__ else: name = fun.__name__ ```` If you mess around with the following code in REPL or ipython, we can figure out what's going on. ``` In [1]: class Test: @classmethod def cmtest(cls): print "cmtest" @staticmethod def smtest(): print "smtest" ...: In [2]: import inspect In [3]: inspect.isfunction(Test.cmtest) Out[3]: False In [4]: inspect.isfunction(Test.smtest) Out[4]: True In [5]: Test.cmtest.__call__ Out[5]: <method-wrapper '__call__' of instancemethod object at 0x7fce55138730> In [6]: Test.smtest.__call__ Out[6]: <method-wrapper '__call__' of function object at 0x7fce5393f758> ``` The staticmethod is taking the "else" branch, while the classmethod is taking the "if" branch. That's about as far down the rabbit hole as I wanted to go. Nevertheless, it's a regression from Celery 3.1.x behavior. Task fail to classmethod in version 4.0.0 ## Checklist - [x] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). - [x] I have verified that the issue exists against the `master` branch of Celery. ## Steps to reproduce I use this command `celery -A base worker -Q mail_remind,sms_remind -l INFO` ``` class PushBullet(object): headers = config.PushBullet.headers phone_id = config.PushBullet.phone_id pc_id = config.PushBullet.pc_id push_url = config.PushBullet.push_url @classmethod @app.task def send_sms_to_phone(cls, title='', body='', device_iden=phone_id): if title or body: data = { "type": "note", "title": title, "body": body, "device_iden": device_iden } # This is requests.post post(cls.push_url, cls.headers, data) ``` And then I call it just like this `PushBullet.send_sms_to_phone.apply_async(args=(title, body))` and it raise an error. ## Expected behavior When I call it like this `PushBullet.send_sms_to_phone(title, body)` is OK. ## Actual behavior ``` [2016-11-19 11:24:27,591: ERROR/PoolWorker-6] Task base.sms_util.send_sms_to_phone[87afdb95-06b8-4eb4-bf52-91afa9913b2c] raised unexpected: AttributeError("'str' object has no attribute 'push_url'",) Traceback (most recent call last): File "/Users/wyx/project/message/.env/lib/python3.5/site-packages/celery/app/trace.py", line 368, in trace_task R = retval = fun(*args, **kwargs) File "/Users/wyx/project/message/.env/lib/python3.5/site-packages/celery/app/trace.py", line 623, in __protected_call__ return self.run(*args, **kwargs) File "/Users/wyx/project/message/base/sms_util.py", line 48, in send_sms_to_phone post(cls.push_url, cls.headers, data) AttributeError: 'str' object has no attribute 'push_url' ```
Just realized that an issue for this already exists: #3609 Did this work in 3.1? I have a hard time seeing how this can work, considering that @app.task doesn't return a function, it returns a class instance that happens to be callable. I came across this independently and didn't notice this issue. I actually get an entirely different traceback, but the same results: classmethod tasks no longer work. More info in #3863
2017-04-01T06:10:32