problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10956 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-413 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2532 State Machine Definition key (OutputPath) for State of Type (Task) is not valid
cfn-lint version: 0.7.3
I am getting the above error when trying to lint a CF template containing a step function. The step function code is working fine in AWS console though.
"CreatePublishedRequest": {
"Type": "Task",
"Resource": "{$createPublishedRequest}",
"ResultPath":"$.publishedRequest",
"OutputPath":"$.publishedRequest",
"Next": "PutRequest"
},
"PutRequest": {
"Type": "Task",
"Resource": "{$updateKey}",
"ResultPath":"$.response",
"Next": "Take Down Mock"
},
When trying to change to using InputPath in "PutRequest" instead I am getting the same error, but for InputPath instead.
</issue>
<code>
[start of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import json
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21
22
23 class StateMachine(CloudFormationLintRule):
24 """Check State Machine Definition"""
25 id = 'E2532'
26 shortdesc = 'Check State Machine Definition for proper syntax'
27 description = 'Check the State Machine String Definition to make sure its JSON. ' \
28 'Validate basic syntax of the file to determine validity.'
29 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'
30 tags = ['resources', 'stepfunctions']
31
32 def __init__(self):
33 """Init"""
34 self.resource_property_types.append('AWS::StepFunctions::StateMachine')
35
36 def _check_state_json(self, def_json, state_name, path):
37 """Check State JSON Definition"""
38 matches = []
39
40 common_state_keys = [
41 'Next',
42 'End',
43 'Type',
44 'Comment',
45 'Input',
46 'Ouptut',
47 ]
48 common_state_required_keys = [
49 'Type',
50 ]
51 state_key_types = {
52 'Pass': ['Result', 'ResultPath'],
53 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'HeartbeatSeconds'],
54 'Choice': ['Choices', 'Default'],
55 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],
56 'Succeed': [],
57 'Fail': ['Cause', 'Error'],
58 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']
59 }
60 state_required_types = {
61 'Pass': [],
62 'Task': ['Resource'],
63 'Choice': ['Choices'],
64 'Wait': [],
65 'Succeed': [],
66 'Fail': [],
67 'Parallel': ['Branches']
68 }
69
70 for req_key in common_state_required_keys:
71 if req_key not in def_json:
72 message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)
73 matches.append(RuleMatch(path, message))
74 return matches
75
76 state_type = def_json.get('Type')
77
78 if state_type in state_key_types:
79 for state_key, _ in def_json.items():
80 if state_key not in common_state_keys + state_key_types.get(state_type, []):
81 message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)
82 matches.append(RuleMatch(path, message))
83 for req_key in common_state_required_keys + state_required_types.get(state_type, []):
84 if req_key not in def_json:
85 message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)
86 matches.append(RuleMatch(path, message))
87 return matches
88 else:
89 message = 'State Machine Definition Type (%s) is not valid' % (state_type)
90 matches.append(RuleMatch(path, message))
91
92 return matches
93
94 def _check_definition_json(self, def_json, path):
95 """Check JSON Definition"""
96 matches = []
97
98 top_level_keys = [
99 'Comment',
100 'StartAt',
101 'TimeoutSeconds',
102 'Version',
103 'States'
104 ]
105 top_level_required_keys = [
106 'StartAt',
107 'States'
108 ]
109 for top_key, _ in def_json.items():
110 if top_key not in top_level_keys:
111 message = 'State Machine Definition key (%s) is not valid' % top_key
112 matches.append(RuleMatch(path, message))
113
114 for req_key in top_level_required_keys:
115 if req_key not in def_json:
116 message = 'State Machine Definition required key (%s) is missing' % req_key
117 matches.append(RuleMatch(path, message))
118
119 for state_name, state_value in def_json.get('States', {}).items():
120 matches.extend(self._check_state_json(state_value, state_name, path))
121 return matches
122
123 def check_value(self, value, path):
124 """Check Definition Value"""
125 matches = []
126 try:
127 def_json = json.loads(value)
128 # pylint: disable=W0703
129 except Exception as err:
130 message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err
131 matches.append(RuleMatch(path, message))
132 return matches
133
134 matches.extend(self._check_definition_json(def_json, path))
135 return matches
136
137 def check_sub(self, value, path):
138 """Check Sub Object"""
139 matches = []
140 if isinstance(value, list):
141 matches.extend(self.check_value(value[0], path))
142 elif isinstance(value, six.string_types):
143 matches.extend(self.check_value(value, path))
144
145 return matches
146
147 def match_resource_properties(self, properties, _, path, cfn):
148 """Check CloudFormation Properties"""
149 matches = []
150
151 matches.extend(
152 cfn.check_value(
153 obj=properties, key='DefinitionString',
154 path=path[:],
155 check_value=self.check_value,
156 check_sub=self.check_sub
157 ))
158
159 return matches
160
[end of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
@@ -37,13 +37,14 @@
"""Check State JSON Definition"""
matches = []
+ # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html
common_state_keys = [
'Next',
'End',
'Type',
'Comment',
- 'Input',
- 'Ouptut',
+ 'InputPath',
+ 'OutputPath',
]
common_state_required_keys = [
'Type',
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -37,13 +37,14 @@\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n \n+ # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n 'Next',\n 'End',\n 'Type',\n 'Comment',\n- 'Input',\n- 'Ouptut',\n+ 'InputPath',\n+ 'OutputPath',\n ]\n common_state_required_keys = [\n 'Type',\n", "issue": "E2532 State Machine Definition key (OutputPath) for State of Type (Task) is not valid\ncfn-lint version: 0.7.3\r\n\r\nI am getting the above error when trying to lint a CF template containing a step function. The step function code is working fine in AWS console though. \r\n\r\n\"CreatePublishedRequest\": {\r\n \"Type\": \"Task\",\r\n \"Resource\": \"{$createPublishedRequest}\",\r\n \"ResultPath\":\"$.publishedRequest\",\r\n \"OutputPath\":\"$.publishedRequest\",\r\n \"Next\": \"PutRequest\"\r\n },\r\n\"PutRequest\": {\r\n \"Type\": \"Task\",\r\n \"Resource\": \"{$updateKey}\",\r\n \"ResultPath\":\"$.response\",\r\n \"Next\": \"Take Down Mock\"\r\n },\r\n\r\nWhen trying to change to using InputPath in \"PutRequest\" instead I am getting the same error, but for InputPath instead. \r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E2532'\n shortdesc = 'Check State Machine Definition for proper syntax'\n description = 'Check the State Machine String Definition to make sure its JSON. ' \\\n 'Validate basic syntax of the file to determine validity.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\n tags = ['resources', 'stepfunctions']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n self.resource_property_types.append('AWS::StepFunctions::StateMachine')\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n common_state_keys = [\n 'Next',\n 'End',\n 'Type',\n 'Comment',\n 'Input',\n 'Ouptut',\n ]\n common_state_required_keys = [\n 'Type',\n ]\n state_key_types = {\n 'Pass': ['Result', 'ResultPath'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n 'Succeed': [],\n 'Fail': ['Cause', 'Error'],\n 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\n }\n state_required_types = {\n 'Pass': [],\n 'Task': ['Resource'],\n 'Choice': ['Choices'],\n 'Wait': [],\n 'Succeed': [],\n 'Fail': [],\n 'Parallel': ['Branches']\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get('Type')\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(state_type, []):\n message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(state_type, []):\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = 'State Machine Definition Type (%s) is not valid' % (state_type)\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\n 'Comment',\n 'StartAt',\n 'TimeoutSeconds',\n 'Version',\n 'States'\n ]\n top_level_required_keys = [\n 'StartAt',\n 'States'\n ]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = 'State Machine Definition key (%s) is not valid' % top_key\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) is missing' % req_key\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get('States', {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\n matches.append(RuleMatch(path, message))\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path))\n elif isinstance(value, six.string_types):\n matches.extend(self.check_value(value, path))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties, key='DefinitionString',\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}]} | 2,434 | 168 |
gh_patches_debug_48428 | rasdani/github-patches | git_diff | pytorch__ignite-930 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Metrics objects are not pickleable
Pickling Metrics objects fails due to infinite loop.
The reason for that is the following:
To make metrics composable, the base class Metric has [methods](https://github.com/pytorch/ignite/blob/master/ignite/metrics/metric.py#L86) to return a `MetricsLambda` object. However, this means that pickling `Metrics` requires pickling `MetricsLambda`, but since `MetricsLambda` depends on `Metrics` we enter an infinite loop.
</issue>
<code>
[start of ignite/metrics/metric.py]
1 import numbers
2 from abc import ABCMeta, abstractmethod
3 from functools import wraps
4 from collections.abc import Mapping
5 import warnings
6
7 from typing import Callable, Union, Optional, Any
8
9 import torch
10 import torch.distributed as dist
11
12 from ignite.engine import Events, Engine
13
14 __all__ = ["Metric"]
15
16
17 class Metric(metaclass=ABCMeta):
18 """
19 Base class for all Metrics.
20
21 Args:
22 output_transform (callable, optional): a callable that is used to transform the
23 :class:`~ignite.engine.Engine`'s `process_function`'s output into the
24 form expected by the metric. This can be useful if, for example, you have a multi-output model and
25 you want to compute the metric with respect to one of the outputs.
26 By default, metrics require the output as `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.
27 device (str of torch.device, optional): device specification in case of distributed computation usage.
28 In most of the cases, it can be defined as "cuda:local_rank" or "cuda"
29 if already set `torch.cuda.set_device(local_rank)`. By default, if a distributed process group is
30 initialized and available, device is set to `cuda`.
31
32 """
33
34 _required_output_keys = ("y_pred", "y")
35
36 def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None):
37 self._output_transform = output_transform
38
39 # Check device if distributed is initialized:
40 if dist.is_available() and dist.is_initialized():
41
42 # check if reset and update methods are decorated. Compute may not be decorated
43 if not (hasattr(self.reset, "_decorated") and hasattr(self.update, "_decorated")):
44 warnings.warn(
45 "{} class does not support distributed setting. Computed result is not collected "
46 "across all computing devices".format(self.__class__.__name__),
47 RuntimeWarning,
48 )
49 if device is None:
50 device = "cuda"
51 device = torch.device(device)
52 self._device = device
53 self._is_reduced = False
54 self.reset()
55
56 @abstractmethod
57 def reset(self) -> None:
58 """
59 Resets the metric to it's initial state.
60
61 This is called at the start of each epoch.
62 """
63 pass
64
65 @abstractmethod
66 def update(self, output) -> None:
67 """
68 Updates the metric's state using the passed batch output.
69
70 This is called once for each batch.
71
72 Args:
73 output: the is the output from the engine's process function.
74 """
75 pass
76
77 @abstractmethod
78 def compute(self) -> Any:
79 """
80 Computes the metric based on it's accumulated state.
81
82 This is called at the end of each epoch.
83
84 Returns:
85 Any: the actual quantity of interest.
86
87 Raises:
88 NotComputableError: raised when the metric cannot be computed.
89 """
90 pass
91
92 def _sync_all_reduce(self, tensor: Union[torch.Tensor, numbers.Number]) -> Union[torch.Tensor, numbers.Number]:
93 if not (dist.is_available() and dist.is_initialized()):
94 # Nothing to reduce
95 return tensor
96
97 tensor_to_number = False
98 if isinstance(tensor, numbers.Number):
99 tensor = torch.tensor(tensor, device=self._device)
100 tensor_to_number = True
101
102 if isinstance(tensor, torch.Tensor):
103 # check if the tensor is at specified device
104 if tensor.device != self._device:
105 tensor = tensor.to(self._device)
106 else:
107 raise TypeError("Unhandled input type {}".format(type(tensor)))
108
109 # synchronize and reduce
110 dist.barrier()
111 dist.all_reduce(tensor)
112
113 if tensor_to_number:
114 return tensor.item()
115 return tensor
116
117 def started(self, engine: Engine) -> None:
118 self.reset()
119
120 @torch.no_grad()
121 def iteration_completed(self, engine: Engine) -> None:
122 output = self._output_transform(engine.state.output)
123 if isinstance(output, Mapping):
124 if self._required_output_keys is None:
125 raise TypeError(
126 "Transformed engine output for {} metric should be a tuple/list, but given {}".format(
127 self.__class__.__name__, type(output)
128 )
129 )
130 if not all([k in output for k in self._required_output_keys]):
131 raise ValueError(
132 "When transformed engine's output is a mapping, "
133 "it should contain {} keys, but given {}".format(self._required_output_keys, list(output.keys()))
134 )
135 output = tuple(output[k] for k in self._required_output_keys)
136 self.update(output)
137
138 def completed(self, engine: Engine, name: str) -> None:
139 result = self.compute()
140 if torch.is_tensor(result) and len(result.shape) == 0:
141 result = result.item()
142 engine.state.metrics[name] = result
143
144 def attach(self, engine: Engine, name: str) -> None:
145 """
146 Attaches current metric to provided engine. On the end of engine's run,
147 `engine.state.metrics` dictionary will contain computed metric's value under provided name.
148
149 Args:
150 engine (Engine): the engine to which the metric must be attached
151 name (str): the name of the metric to attach
152
153 Example:
154
155 .. code-block:: python
156
157 metric = ...
158 metric.attach(engine, "mymetric")
159
160 assert "mymetric" in engine.run(data).metrics
161
162 assert metric.is_attached(engine)
163 """
164 engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
165 if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):
166 engine.add_event_handler(Events.EPOCH_STARTED, self.started)
167 if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
168 engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
169
170 def detach(self, engine: Engine) -> None:
171 """
172 Detaches current metric from the engine and no metric's computation is done during the run.
173 This method in conjunction with :meth:`~ignite.metrics.Metric.attach` can be useful if several
174 metrics need to be computed with different periods. For example, one metric is computed every training epoch
175 and another metric (e.g. more expensive one) is done every n-th training epoch.
176
177 Args:
178 engine (Engine): the engine from which the metric must be detached
179
180 Example:
181
182 .. code-block:: python
183
184 metric = ...
185 engine = ...
186 metric.detach(engine)
187
188 assert "mymetric" not in engine.run(data).metrics
189
190 assert not metric.is_attached(engine)
191 """
192 if engine.has_event_handler(self.completed, Events.EPOCH_COMPLETED):
193 engine.remove_event_handler(self.completed, Events.EPOCH_COMPLETED)
194 if engine.has_event_handler(self.started, Events.EPOCH_STARTED):
195 engine.remove_event_handler(self.started, Events.EPOCH_STARTED)
196 if engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
197 engine.remove_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED)
198
199 def is_attached(self, engine: Engine) -> bool:
200 """
201 Checks if current metric is attached to provided engine. If attached, metric's computed
202 value is written to `engine.state.metrics` dictionary.
203
204 Args:
205 engine (Engine): the engine checked from which the metric should be attached
206 """
207 return engine.has_event_handler(self.completed, Events.EPOCH_COMPLETED)
208
209 def __add__(self, other):
210 from ignite.metrics import MetricsLambda
211
212 return MetricsLambda(lambda x, y: x + y, self, other)
213
214 def __radd__(self, other):
215 from ignite.metrics import MetricsLambda
216
217 return MetricsLambda(lambda x, y: x + y, other, self)
218
219 def __sub__(self, other):
220 from ignite.metrics import MetricsLambda
221
222 return MetricsLambda(lambda x, y: x - y, self, other)
223
224 def __rsub__(self, other):
225 from ignite.metrics import MetricsLambda
226
227 return MetricsLambda(lambda x, y: x - y, other, self)
228
229 def __mul__(self, other):
230 from ignite.metrics import MetricsLambda
231
232 return MetricsLambda(lambda x, y: x * y, self, other)
233
234 def __rmul__(self, other):
235 from ignite.metrics import MetricsLambda
236
237 return MetricsLambda(lambda x, y: x * y, other, self)
238
239 def __pow__(self, other):
240 from ignite.metrics import MetricsLambda
241
242 return MetricsLambda(lambda x, y: x ** y, self, other)
243
244 def __rpow__(self, other):
245 from ignite.metrics import MetricsLambda
246
247 return MetricsLambda(lambda x, y: x ** y, other, self)
248
249 def __mod__(self, other):
250 from ignite.metrics import MetricsLambda
251
252 return MetricsLambda(lambda x, y: x % y, self, other)
253
254 def __div__(self, other):
255 from ignite.metrics import MetricsLambda
256
257 return MetricsLambda(lambda x, y: x.__div__(y), self, other)
258
259 def __rdiv__(self, other):
260 from ignite.metrics import MetricsLambda
261
262 return MetricsLambda(lambda x, y: x.__div__(y), other, self)
263
264 def __truediv__(self, other):
265 from ignite.metrics import MetricsLambda
266
267 return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
268
269 def __rtruediv__(self, other):
270 from ignite.metrics import MetricsLambda
271
272 return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)
273
274 def __floordiv__(self, other):
275 from ignite.metrics import MetricsLambda
276
277 return MetricsLambda(lambda x, y: x // y, self, other)
278
279 def __getattr__(self, attr: str) -> Callable:
280 from ignite.metrics import MetricsLambda
281
282 def fn(x, *args, **kwargs):
283 return getattr(x, attr)(*args, **kwargs)
284
285 def wrapper(*args, **kwargs):
286 return MetricsLambda(fn, self, *args, **kwargs)
287
288 return wrapper
289
290 def __getitem__(self, index: Any):
291 from ignite.metrics import MetricsLambda
292
293 return MetricsLambda(lambda x: x[index], self)
294
295
296 def sync_all_reduce(*attrs) -> Callable:
297 def wrapper(func: Callable) -> Callable:
298 @wraps(func)
299 def another_wrapper(self: Metric, *args, **kwargs) -> Callable:
300 if not isinstance(self, Metric):
301 raise RuntimeError(
302 "Decorator sync_all_reduce should be used on " "ignite.metric.Metric class methods only"
303 )
304
305 if len(attrs) > 0 and not self._is_reduced:
306 for attr in attrs:
307 t = getattr(self, attr, None)
308 if t is not None:
309 t = self._sync_all_reduce(t)
310 self._is_reduced = True
311 setattr(self, attr, t)
312
313 return func(self, *args, **kwargs)
314
315 return another_wrapper
316
317 wrapper._decorated = True
318 return wrapper
319
320
321 def reinit__is_reduced(func: Callable) -> Callable:
322 @wraps(func)
323 def wrapper(self, *args, **kwargs):
324 func(self, *args, **kwargs)
325 self._is_reduced = False
326
327 wrapper._decorated = True
328 return wrapper
329
[end of ignite/metrics/metric.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py
--- a/ignite/metrics/metric.py
+++ b/ignite/metrics/metric.py
@@ -292,6 +292,12 @@
return MetricsLambda(lambda x: x[index], self)
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__.update(d)
+
def sync_all_reduce(*attrs) -> Callable:
def wrapper(func: Callable) -> Callable:
| {"golden_diff": "diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py\n--- a/ignite/metrics/metric.py\n+++ b/ignite/metrics/metric.py\n@@ -292,6 +292,12 @@\n \n return MetricsLambda(lambda x: x[index], self)\n \n+ def __getstate__(self):\n+ return self.__dict__\n+\n+ def __setstate__(self, d):\n+ self.__dict__.update(d)\n+\n \n def sync_all_reduce(*attrs) -> Callable:\n def wrapper(func: Callable) -> Callable:\n", "issue": "Metrics objects are not pickleable\nPickling Metrics objects fails due to infinite loop.\r\n\r\nThe reason for that is the following:\r\n\r\nTo make metrics composable, the base class Metric has [methods](https://github.com/pytorch/ignite/blob/master/ignite/metrics/metric.py#L86) to return a `MetricsLambda` object. However, this means that pickling `Metrics` requires pickling `MetricsLambda`, but since `MetricsLambda` depends on `Metrics` we enter an infinite loop.\r\n\r\n\n", "before_files": [{"content": "import numbers\nfrom abc import ABCMeta, abstractmethod\nfrom functools import wraps\nfrom collections.abc import Mapping\nimport warnings\n\nfrom typing import Callable, Union, Optional, Any\n\nimport torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Events, Engine\n\n__all__ = [\"Metric\"]\n\n\nclass Metric(metaclass=ABCMeta):\n \"\"\"\n Base class for all Metrics.\n\n Args:\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n By default, metrics require the output as `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.\n device (str of torch.device, optional): device specification in case of distributed computation usage.\n In most of the cases, it can be defined as \"cuda:local_rank\" or \"cuda\"\n if already set `torch.cuda.set_device(local_rank)`. By default, if a distributed process group is\n initialized and available, device is set to `cuda`.\n\n \"\"\"\n\n _required_output_keys = (\"y_pred\", \"y\")\n\n def __init__(self, output_transform: Callable = lambda x: x, device: Optional[Union[str, torch.device]] = None):\n self._output_transform = output_transform\n\n # Check device if distributed is initialized:\n if dist.is_available() and dist.is_initialized():\n\n # check if reset and update methods are decorated. Compute may not be decorated\n if not (hasattr(self.reset, \"_decorated\") and hasattr(self.update, \"_decorated\")):\n warnings.warn(\n \"{} class does not support distributed setting. Computed result is not collected \"\n \"across all computing devices\".format(self.__class__.__name__),\n RuntimeWarning,\n )\n if device is None:\n device = \"cuda\"\n device = torch.device(device)\n self._device = device\n self._is_reduced = False\n self.reset()\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\"\n Resets the metric to it's initial state.\n\n This is called at the start of each epoch.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self, output) -> None:\n \"\"\"\n Updates the metric's state using the passed batch output.\n\n This is called once for each batch.\n\n Args:\n output: the is the output from the engine's process function.\n \"\"\"\n pass\n\n @abstractmethod\n def compute(self) -> Any:\n \"\"\"\n Computes the metric based on it's accumulated state.\n\n This is called at the end of each epoch.\n\n Returns:\n Any: the actual quantity of interest.\n\n Raises:\n NotComputableError: raised when the metric cannot be computed.\n \"\"\"\n pass\n\n def _sync_all_reduce(self, tensor: Union[torch.Tensor, numbers.Number]) -> Union[torch.Tensor, numbers.Number]:\n if not (dist.is_available() and dist.is_initialized()):\n # Nothing to reduce\n return tensor\n\n tensor_to_number = False\n if isinstance(tensor, numbers.Number):\n tensor = torch.tensor(tensor, device=self._device)\n tensor_to_number = True\n\n if isinstance(tensor, torch.Tensor):\n # check if the tensor is at specified device\n if tensor.device != self._device:\n tensor = tensor.to(self._device)\n else:\n raise TypeError(\"Unhandled input type {}\".format(type(tensor)))\n\n # synchronize and reduce\n dist.barrier()\n dist.all_reduce(tensor)\n\n if tensor_to_number:\n return tensor.item()\n return tensor\n\n def started(self, engine: Engine) -> None:\n self.reset()\n\n @torch.no_grad()\n def iteration_completed(self, engine: Engine) -> None:\n output = self._output_transform(engine.state.output)\n if isinstance(output, Mapping):\n if self._required_output_keys is None:\n raise TypeError(\n \"Transformed engine output for {} metric should be a tuple/list, but given {}\".format(\n self.__class__.__name__, type(output)\n )\n )\n if not all([k in output for k in self._required_output_keys]):\n raise ValueError(\n \"When transformed engine's output is a mapping, \"\n \"it should contain {} keys, but given {}\".format(self._required_output_keys, list(output.keys()))\n )\n output = tuple(output[k] for k in self._required_output_keys)\n self.update(output)\n\n def completed(self, engine: Engine, name: str) -> None:\n result = self.compute()\n if torch.is_tensor(result) and len(result.shape) == 0:\n result = result.item()\n engine.state.metrics[name] = result\n\n def attach(self, engine: Engine, name: str) -> None:\n \"\"\"\n Attaches current metric to provided engine. On the end of engine's run,\n `engine.state.metrics` dictionary will contain computed metric's value under provided name.\n\n Args:\n engine (Engine): the engine to which the metric must be attached\n name (str): the name of the metric to attach\n\n Example:\n\n .. code-block:: python\n\n metric = ...\n metric.attach(engine, \"mymetric\")\n\n assert \"mymetric\" in engine.run(data).metrics\n\n assert metric.is_attached(engine)\n \"\"\"\n engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)\n if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n\n def detach(self, engine: Engine) -> None:\n \"\"\"\n Detaches current metric from the engine and no metric's computation is done during the run.\n This method in conjunction with :meth:`~ignite.metrics.Metric.attach` can be useful if several\n metrics need to be computed with different periods. For example, one metric is computed every training epoch\n and another metric (e.g. more expensive one) is done every n-th training epoch.\n\n Args:\n engine (Engine): the engine from which the metric must be detached\n\n Example:\n\n .. code-block:: python\n\n metric = ...\n engine = ...\n metric.detach(engine)\n\n assert \"mymetric\" not in engine.run(data).metrics\n\n assert not metric.is_attached(engine)\n \"\"\"\n if engine.has_event_handler(self.completed, Events.EPOCH_COMPLETED):\n engine.remove_event_handler(self.completed, Events.EPOCH_COMPLETED)\n if engine.has_event_handler(self.started, Events.EPOCH_STARTED):\n engine.remove_event_handler(self.started, Events.EPOCH_STARTED)\n if engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):\n engine.remove_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED)\n\n def is_attached(self, engine: Engine) -> bool:\n \"\"\"\n Checks if current metric is attached to provided engine. If attached, metric's computed\n value is written to `engine.state.metrics` dictionary.\n\n Args:\n engine (Engine): the engine checked from which the metric should be attached\n \"\"\"\n return engine.has_event_handler(self.completed, Events.EPOCH_COMPLETED)\n\n def __add__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x + y, self, other)\n\n def __radd__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x + y, other, self)\n\n def __sub__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x - y, self, other)\n\n def __rsub__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x - y, other, self)\n\n def __mul__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x * y, self, other)\n\n def __rmul__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x * y, other, self)\n\n def __pow__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x ** y, self, other)\n\n def __rpow__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x ** y, other, self)\n\n def __mod__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x % y, self, other)\n\n def __div__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x.__div__(y), self, other)\n\n def __rdiv__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x.__div__(y), other, self)\n\n def __truediv__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)\n\n def __rtruediv__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)\n\n def __floordiv__(self, other):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x, y: x // y, self, other)\n\n def __getattr__(self, attr: str) -> Callable:\n from ignite.metrics import MetricsLambda\n\n def fn(x, *args, **kwargs):\n return getattr(x, attr)(*args, **kwargs)\n\n def wrapper(*args, **kwargs):\n return MetricsLambda(fn, self, *args, **kwargs)\n\n return wrapper\n\n def __getitem__(self, index: Any):\n from ignite.metrics import MetricsLambda\n\n return MetricsLambda(lambda x: x[index], self)\n\n\ndef sync_all_reduce(*attrs) -> Callable:\n def wrapper(func: Callable) -> Callable:\n @wraps(func)\n def another_wrapper(self: Metric, *args, **kwargs) -> Callable:\n if not isinstance(self, Metric):\n raise RuntimeError(\n \"Decorator sync_all_reduce should be used on \" \"ignite.metric.Metric class methods only\"\n )\n\n if len(attrs) > 0 and not self._is_reduced:\n for attr in attrs:\n t = getattr(self, attr, None)\n if t is not None:\n t = self._sync_all_reduce(t)\n self._is_reduced = True\n setattr(self, attr, t)\n\n return func(self, *args, **kwargs)\n\n return another_wrapper\n\n wrapper._decorated = True\n return wrapper\n\n\ndef reinit__is_reduced(func: Callable) -> Callable:\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n func(self, *args, **kwargs)\n self._is_reduced = False\n\n wrapper._decorated = True\n return wrapper\n", "path": "ignite/metrics/metric.py"}]} | 4,015 | 128 |
gh_patches_debug_37541 | rasdani/github-patches | git_diff | aws__aws-cli-1039 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws cloudwatch put-metric-data no longer working with statistic sets
One of our automated scripts stopped reporting data a few weeks ago - we've traced this to a newer version of the AWS CLI.
In fact, the documented example for how to publish statistic sets (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#publishingDataPoints1) fails with the same error that we are getting.
```
$ aws cloudwatch put-metric-data --metric-name PageViewCount --namespace "MyService" --statistic-value Sum=11,Minimum=2,Maximum=5,SampleCount=3 --timestamp 2014-02-14T12:00:00.000Z
Parameter validation failed:
Invalid type for parameter MetricData[0].StatisticValues.SampleCount, value: 3, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>
Invalid type for parameter MetricData[0].StatisticValues.Sum, value: 11, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>
Invalid type for parameter MetricData[0].StatisticValues.Minimum, value: 2, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>
Invalid type for parameter MetricData[0].StatisticValues.Maximum, value: 5, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>
```
</issue>
<code>
[start of awscli/customizations/putmetricdata.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds the following scalar parameters to the
15 cloudwatch put-metric-data operation:
16
17 * --metric-name
18 * --dimensions
19 * --timestamp
20 * --value
21 * --statistic-values
22 * --unit
23
24 """
25 import decimal
26
27 from awscli.arguments import CustomArgument
28 from awscli.utils import split_on_commas
29 from awscli.customizations.utils import validate_mutually_exclusive_handler
30
31
32 def register_put_metric_data(event_handler):
33 event_handler.register('building-argument-table.cloudwatch.put-metric-data',
34 _promote_args)
35 event_handler.register(
36 'operation-args-parsed.cloudwatch.put-metric-data',
37 validate_mutually_exclusive_handler(
38 ['metric_data'], ['metric_name', 'timestamp', 'unit', 'value',
39 'dimensions', 'statistic_values']))
40
41
42 def _promote_args(argument_table, operation, **kwargs):
43 # We're providing top level params for metric-data. This means
44 # that metric-data is now longer a required arg. We do need
45 # to check that either metric-data or the complex args we've added
46 # have been provided.
47 argument_table['metric-data'].required = False
48
49 argument_table['metric-name'] = PutMetricArgument(
50 'metric-name', help_text='The name of the metric.')
51 argument_table['timestamp'] = PutMetricArgument(
52 'timestamp', help_text='The time stamp used for the metric. '
53 'If not specified, the default value is '
54 'set to the time the metric data was '
55 'received.')
56 argument_table['unit'] = PutMetricArgument(
57 'unit', help_text='The unit of metric.')
58 argument_table['value'] = PutMetricArgument(
59 'value', help_text='The value for the metric. Although the --value '
60 'parameter accepts numbers of type Double, '
61 'Amazon CloudWatch truncates values with very '
62 'large exponents. Values with base-10 exponents '
63 'greater than 126 (1 x 10^126) are truncated. '
64 'Likewise, values with base-10 exponents less '
65 'than -130 (1 x 10^-130) are also truncated.')
66
67 argument_table['dimensions'] = PutMetricArgument(
68 'dimensions', help_text=(
69 'The --dimension argument further expands '
70 'on the identity of a metric using a Name=Value'
71 'pair, separated by commas, for example: '
72 '<code>--dimensions User=SomeUser,Stack=Test</code>'))
73 argument_table['statistic-values'] = PutMetricArgument(
74 'statistic-values', help_text='A set of statistical values describing '
75 'the metric.')
76
77
78 def insert_first_element(name):
79 def _wrap_add_to_params(func):
80 def _add_to_params(self, parameters, value):
81 if value is None:
82 return
83 if name not in parameters:
84 # We're taking a shortcut here and assuming that the first
85 # element is a struct type, hence the default value of
86 # a dict. If this was going to be more general we'd need
87 # to have this paramterized, i.e. you pass in some sort of
88 # factory function that creates the initial starting value.
89 parameters[name] = [{}]
90 first_element = parameters[name][0]
91 return func(self, first_element, value)
92 return _add_to_params
93 return _wrap_add_to_params
94
95
96 class PutMetricArgument(CustomArgument):
97 def add_to_params(self, parameters, value):
98 method_name = '_add_param_%s' % self.name.replace('-', '_')
99 return getattr(self, method_name)(parameters, value)
100
101 @insert_first_element('metric_data')
102 def _add_param_metric_name(self, first_element, value):
103 first_element['MetricName'] = value
104
105 @insert_first_element('metric_data')
106 def _add_param_unit(self, first_element, value):
107 first_element['Unit'] = value
108
109 @insert_first_element('metric_data')
110 def _add_param_timestamp(self, first_element, value):
111 first_element['Timestamp'] = value
112
113 @insert_first_element('metric_data')
114 def _add_param_value(self, first_element, value):
115 # Use a Decimal to avoid loss in precision.
116 first_element['Value'] = decimal.Decimal(value)
117
118 @insert_first_element('metric_data')
119 def _add_param_dimensions(self, first_element, value):
120 # Dimensions needs a little more processing. We support
121 # the key=value,key2=value syntax so we need to parse
122 # that.
123 dimensions = []
124 for pair in split_on_commas(value):
125 key, value = pair.split('=')
126 dimensions.append({'Name': key, 'Value': value})
127 first_element['Dimensions'] = dimensions
128
129 @insert_first_element('metric_data')
130 def _add_param_statistic_values(self, first_element, value):
131 # StatisticValues is a struct type so we are parsing
132 # a csv keyval list into a dict.
133 statistics = {}
134 for pair in split_on_commas(value):
135 key, value = pair.split('=')
136 statistics[key] = value
137 first_element['StatisticValues'] = statistics
138
[end of awscli/customizations/putmetricdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/putmetricdata.py b/awscli/customizations/putmetricdata.py
--- a/awscli/customizations/putmetricdata.py
+++ b/awscli/customizations/putmetricdata.py
@@ -98,24 +98,24 @@
method_name = '_add_param_%s' % self.name.replace('-', '_')
return getattr(self, method_name)(parameters, value)
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_metric_name(self, first_element, value):
first_element['MetricName'] = value
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_unit(self, first_element, value):
first_element['Unit'] = value
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_timestamp(self, first_element, value):
first_element['Timestamp'] = value
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_value(self, first_element, value):
# Use a Decimal to avoid loss in precision.
first_element['Value'] = decimal.Decimal(value)
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_dimensions(self, first_element, value):
# Dimensions needs a little more processing. We support
# the key=value,key2=value syntax so we need to parse
@@ -126,12 +126,15 @@
dimensions.append({'Name': key, 'Value': value})
first_element['Dimensions'] = dimensions
- @insert_first_element('metric_data')
+ @insert_first_element('MetricData')
def _add_param_statistic_values(self, first_element, value):
# StatisticValues is a struct type so we are parsing
# a csv keyval list into a dict.
statistics = {}
for pair in split_on_commas(value):
key, value = pair.split('=')
- statistics[key] = value
+ # There are four supported values: Maximum, Minimum, SampleCount,
+ # and Sum. All of them are documented as a type double so we can
+ # convert these to a decimal value to preserve precision.
+ statistics[key] = decimal.Decimal(value)
first_element['StatisticValues'] = statistics
| {"golden_diff": "diff --git a/awscli/customizations/putmetricdata.py b/awscli/customizations/putmetricdata.py\n--- a/awscli/customizations/putmetricdata.py\n+++ b/awscli/customizations/putmetricdata.py\n@@ -98,24 +98,24 @@\n method_name = '_add_param_%s' % self.name.replace('-', '_')\n return getattr(self, method_name)(parameters, value)\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_metric_name(self, first_element, value):\n first_element['MetricName'] = value\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_unit(self, first_element, value):\n first_element['Unit'] = value\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_timestamp(self, first_element, value):\n first_element['Timestamp'] = value\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_value(self, first_element, value):\n # Use a Decimal to avoid loss in precision.\n first_element['Value'] = decimal.Decimal(value)\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_dimensions(self, first_element, value):\n # Dimensions needs a little more processing. We support\n # the key=value,key2=value syntax so we need to parse\n@@ -126,12 +126,15 @@\n dimensions.append({'Name': key, 'Value': value})\n first_element['Dimensions'] = dimensions\n \n- @insert_first_element('metric_data')\n+ @insert_first_element('MetricData')\n def _add_param_statistic_values(self, first_element, value):\n # StatisticValues is a struct type so we are parsing\n # a csv keyval list into a dict.\n statistics = {}\n for pair in split_on_commas(value):\n key, value = pair.split('=')\n- statistics[key] = value\n+ # There are four supported values: Maximum, Minimum, SampleCount,\n+ # and Sum. All of them are documented as a type double so we can\n+ # convert these to a decimal value to preserve precision.\n+ statistics[key] = decimal.Decimal(value)\n first_element['StatisticValues'] = statistics\n", "issue": "aws cloudwatch put-metric-data no longer working with statistic sets\nOne of our automated scripts stopped reporting data a few weeks ago - we've traced this to a newer version of the AWS CLI.\n\nIn fact, the documented example for how to publish statistic sets (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#publishingDataPoints1) fails with the same error that we are getting.\n\n```\n$ aws cloudwatch put-metric-data --metric-name PageViewCount --namespace \"MyService\" --statistic-value Sum=11,Minimum=2,Maximum=5,SampleCount=3 --timestamp 2014-02-14T12:00:00.000Z\n\nParameter validation failed:\nInvalid type for parameter MetricData[0].StatisticValues.SampleCount, value: 3, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>\nInvalid type for parameter MetricData[0].StatisticValues.Sum, value: 11, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>\nInvalid type for parameter MetricData[0].StatisticValues.Minimum, value: 2, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>\nInvalid type for parameter MetricData[0].StatisticValues.Maximum, value: 5, type: <type 'unicode'>, valid types: <type 'float'>, <class 'decimal.Decimal'>, <type 'int'>, <type 'long'>\n```\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds the following scalar parameters to the\ncloudwatch put-metric-data operation:\n\n* --metric-name\n* --dimensions\n* --timestamp\n* --value\n* --statistic-values\n* --unit\n\n\"\"\"\nimport decimal\n\nfrom awscli.arguments import CustomArgument\nfrom awscli.utils import split_on_commas\nfrom awscli.customizations.utils import validate_mutually_exclusive_handler\n\n\ndef register_put_metric_data(event_handler):\n event_handler.register('building-argument-table.cloudwatch.put-metric-data',\n _promote_args)\n event_handler.register(\n 'operation-args-parsed.cloudwatch.put-metric-data',\n validate_mutually_exclusive_handler(\n ['metric_data'], ['metric_name', 'timestamp', 'unit', 'value',\n 'dimensions', 'statistic_values']))\n\n\ndef _promote_args(argument_table, operation, **kwargs):\n # We're providing top level params for metric-data. This means\n # that metric-data is now longer a required arg. We do need\n # to check that either metric-data or the complex args we've added\n # have been provided.\n argument_table['metric-data'].required = False\n\n argument_table['metric-name'] = PutMetricArgument(\n 'metric-name', help_text='The name of the metric.')\n argument_table['timestamp'] = PutMetricArgument(\n 'timestamp', help_text='The time stamp used for the metric. '\n 'If not specified, the default value is '\n 'set to the time the metric data was '\n 'received.')\n argument_table['unit'] = PutMetricArgument(\n 'unit', help_text='The unit of metric.')\n argument_table['value'] = PutMetricArgument(\n 'value', help_text='The value for the metric. Although the --value '\n 'parameter accepts numbers of type Double, '\n 'Amazon CloudWatch truncates values with very '\n 'large exponents. Values with base-10 exponents '\n 'greater than 126 (1 x 10^126) are truncated. '\n 'Likewise, values with base-10 exponents less '\n 'than -130 (1 x 10^-130) are also truncated.')\n\n argument_table['dimensions'] = PutMetricArgument(\n 'dimensions', help_text=(\n 'The --dimension argument further expands '\n 'on the identity of a metric using a Name=Value'\n 'pair, separated by commas, for example: '\n '<code>--dimensions User=SomeUser,Stack=Test</code>'))\n argument_table['statistic-values'] = PutMetricArgument(\n 'statistic-values', help_text='A set of statistical values describing '\n 'the metric.')\n\n\ndef insert_first_element(name):\n def _wrap_add_to_params(func):\n def _add_to_params(self, parameters, value):\n if value is None:\n return\n if name not in parameters:\n # We're taking a shortcut here and assuming that the first\n # element is a struct type, hence the default value of\n # a dict. If this was going to be more general we'd need\n # to have this paramterized, i.e. you pass in some sort of\n # factory function that creates the initial starting value.\n parameters[name] = [{}]\n first_element = parameters[name][0]\n return func(self, first_element, value)\n return _add_to_params\n return _wrap_add_to_params\n\n\nclass PutMetricArgument(CustomArgument):\n def add_to_params(self, parameters, value):\n method_name = '_add_param_%s' % self.name.replace('-', '_')\n return getattr(self, method_name)(parameters, value)\n\n @insert_first_element('metric_data')\n def _add_param_metric_name(self, first_element, value):\n first_element['MetricName'] = value\n\n @insert_first_element('metric_data')\n def _add_param_unit(self, first_element, value):\n first_element['Unit'] = value\n\n @insert_first_element('metric_data')\n def _add_param_timestamp(self, first_element, value):\n first_element['Timestamp'] = value\n\n @insert_first_element('metric_data')\n def _add_param_value(self, first_element, value):\n # Use a Decimal to avoid loss in precision.\n first_element['Value'] = decimal.Decimal(value)\n\n @insert_first_element('metric_data')\n def _add_param_dimensions(self, first_element, value):\n # Dimensions needs a little more processing. We support\n # the key=value,key2=value syntax so we need to parse\n # that.\n dimensions = []\n for pair in split_on_commas(value):\n key, value = pair.split('=')\n dimensions.append({'Name': key, 'Value': value})\n first_element['Dimensions'] = dimensions\n\n @insert_first_element('metric_data')\n def _add_param_statistic_values(self, first_element, value):\n # StatisticValues is a struct type so we are parsing\n # a csv keyval list into a dict.\n statistics = {}\n for pair in split_on_commas(value):\n key, value = pair.split('=')\n statistics[key] = value\n first_element['StatisticValues'] = statistics\n", "path": "awscli/customizations/putmetricdata.py"}]} | 2,499 | 535 |
gh_patches_debug_34183 | rasdani/github-patches | git_diff | sonic-net__sonic-mgmt-4352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate RDMA nightly run failures on 202012
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/Azure/SONiC/wiki#report-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support can be found at the following locations:
- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
RDMA test runs on TD2 with 202012 are quite flaky. Different set of test failures are seen daily and sometimes test fails at pretest
09/09 run skipped all tgen tests with the following reason
SKIPPED [1] /azp/agent/_work/27/s/tests/common/helpers/assertions.py:13: Port is not mapped to the expected DUT
</issue>
<code>
[start of ansible/library/testbed_vm_info.py]
1 #!/usr/bin/env python
2
3 import re
4 import yaml
5 import os
6 import traceback
7 import subprocess
8 import ipaddr as ipaddress
9 from operator import itemgetter
10 from itertools import groupby
11 from collections import defaultdict
12 import re
13
14 from ansible.parsing.dataloader import DataLoader
15 from ansible.inventory.manager import InventoryManager
16
17 DOCUMENTATION = '''
18 module: testbed_vm_info.py
19 Ansible_version_added: 2.0.0.2
20 short_description: Gather all related VMs info
21 Description:
22 When deploy testbed topology with VM connected to SONiC, gather neighbor VMs info for generating SONiC minigraph file
23 options:
24 base_vm: base vm name defined in testbed.csv for the deployed topology; required: True
25 topo: topology name defined in testbed.csv for the deployed topology; required: True
26 vm_file: the virtual machine file path ; default: 'veos'
27
28 Ansible_facts:
29 'neighbor_eosvm_mgmt': all VM hosts management IPs
30 'topoall': topology information
31
32 '''
33
34 EXAMPLES = '''
35 - name: gather vm information
36 testbed_vm_info: base_vm='VM0100' topo='t1' vm_file='veos'
37 '''
38
39 ### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here
40 TOPO_PATH = 'vars/'
41 VM_INV_FILE = 'veos'
42
43
44 class TestbedVMFacts():
45 """
46 Retrieve testbed VMs management information that for a specified toplogy defined in testbed.csv
47
48 """
49
50 def __init__(self, toponame, vmbase, vmfile):
51 CLET_SUFFIX = "-clet"
52 toponame = re.sub(CLET_SUFFIX + "$", "", toponame)
53 self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'
54 self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
55 self.vmhosts = {}
56 self.vmfile = vmfile
57 self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)
58 return
59
60
61 def get_neighbor_eos(self):
62 eos = {}
63 with open(self.topofile) as f:
64 vm_topology = yaml.load(f)
65 self.topoall = vm_topology
66 for vm in vm_topology['topology']['VMs']:
67 vm_index = int(vm_topology['topology']['VMs'][vm]['vm_offset'])+self.start_index
68 eos[vm] = vm_index
69 return eos
70
71
72 def main():
73 module = AnsibleModule(
74 argument_spec=dict(
75 base_vm=dict(required=True, type='str'),
76 topo=dict(required=True, type='str'),
77 vm_file=dict(default=VM_INV_FILE, type='str')
78 ),
79 supports_check_mode=True
80 )
81 m_args = module.params
82 topo_type = m_args['topo']
83 if 'ptf' in topo_type:
84 module.exit_json(ansible_facts={'neighbor_eosvm_mgmt': {}})
85 try:
86 vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])
87 neighbor_eos = vmsall.get_neighbor_eos()
88 for eos in neighbor_eos:
89 vmname = 'VM'+format(neighbor_eos[eos], '04d')
90 if vmname in vmsall.inv_mgr.hosts:
91 vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']
92 else:
93 err_msg = "cannot find the vm " + vmname + " in VM inventory file, please make sure you have enough VMs for the topology you are using"
94 module.fail_json(msg=err_msg)
95 module.exit_json(ansible_facts={'neighbor_eosvm_mgmt':vmsall.vmhosts, 'topoall': vmsall.topoall})
96 except (IOError, OSError):
97 module.fail_json(msg="Can not find file "+vmsall.topofile+" or "+m_args['vm_file']+" or "+VM_INV_FILE)
98 except Exception as e:
99 module.fail_json(msg=traceback.format_exc())
100
101 from ansible.module_utils.basic import *
102 if __name__ == "__main__":
103 main()
104
105
[end of ansible/library/testbed_vm_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py
--- a/ansible/library/testbed_vm_info.py
+++ b/ansible/library/testbed_vm_info.py
@@ -39,6 +39,7 @@
### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here
TOPO_PATH = 'vars/'
VM_INV_FILE = 'veos'
+TGEN_MGMT_NETWORK = '10.65.32.0/24'
class TestbedVMFacts():
@@ -51,7 +52,10 @@
CLET_SUFFIX = "-clet"
toponame = re.sub(CLET_SUFFIX + "$", "", toponame)
self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'
- self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
+ if vmbase != '':
+ self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
+ else:
+ self.start_index = 0
self.vmhosts = {}
self.vmfile = vmfile
self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)
@@ -85,9 +89,12 @@
try:
vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])
neighbor_eos = vmsall.get_neighbor_eos()
- for eos in neighbor_eos:
+ tgen_mgmt_ips = list(ipaddress.IPNetwork(unicode(TGEN_MGMT_NETWORK)))
+ for index, eos in enumerate(neighbor_eos):
vmname = 'VM'+format(neighbor_eos[eos], '04d')
- if vmname in vmsall.inv_mgr.hosts:
+ if 'tgen' in topo_type:
+ vmsall.vmhosts[eos] = str(tgen_mgmt_ips[index])
+ elif vmname in vmsall.inv_mgr.hosts:
vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']
else:
err_msg = "cannot find the vm " + vmname + " in VM inventory file, please make sure you have enough VMs for the topology you are using"
| {"golden_diff": "diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py\n--- a/ansible/library/testbed_vm_info.py\n+++ b/ansible/library/testbed_vm_info.py\n@@ -39,6 +39,7 @@\n ### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here\n TOPO_PATH = 'vars/'\n VM_INV_FILE = 'veos'\n+TGEN_MGMT_NETWORK = '10.65.32.0/24'\n \n \n class TestbedVMFacts():\n@@ -51,7 +52,10 @@\n CLET_SUFFIX = \"-clet\"\n toponame = re.sub(CLET_SUFFIX + \"$\", \"\", toponame)\n self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'\n- self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n+ if vmbase != '':\n+ self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n+ else:\n+ self.start_index = 0\n self.vmhosts = {}\n self.vmfile = vmfile\n self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)\n@@ -85,9 +89,12 @@\n try:\n vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])\n neighbor_eos = vmsall.get_neighbor_eos()\n- for eos in neighbor_eos:\n+ tgen_mgmt_ips = list(ipaddress.IPNetwork(unicode(TGEN_MGMT_NETWORK)))\n+ for index, eos in enumerate(neighbor_eos):\n vmname = 'VM'+format(neighbor_eos[eos], '04d')\n- if vmname in vmsall.inv_mgr.hosts:\n+ if 'tgen' in topo_type:\n+ vmsall.vmhosts[eos] = str(tgen_mgmt_ips[index])\n+ elif vmname in vmsall.inv_mgr.hosts:\n vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']\n else:\n err_msg = \"cannot find the vm \" + vmname + \" in VM inventory file, please make sure you have enough VMs for the topology you are using\"\n", "issue": "Investigate RDMA nightly run failures on 202012\n<!--\r\nIf you are reporting a new issue, make sure that we do not have any duplicates\r\nalready open. You can ensure this by searching the issue list for this\r\nrepository. If there is a duplicate, please close your issue and add a comment\r\nto the existing issue instead.\r\n\r\nIf you suspect your issue is a bug, please edit your issue description to\r\ninclude the BUG REPORT INFORMATION shown below. If you fail to provide this\r\ninformation within 7 days, we cannot debug your issue and will close it. We\r\nwill, however, reopen it if you later provide the information.\r\n\r\nFor more information about reporting issues, see\r\nhttps://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n---------------------------------------------------\r\nGENERAL SUPPORT INFORMATION\r\n---------------------------------------------------\r\n\r\nThe GitHub issue tracker is for bug reports and feature requests.\r\nGeneral support can be found at the following locations:\r\n\r\n- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n---------------------------------------------------\r\nBUG REPORT INFORMATION\r\n---------------------------------------------------\r\nUse the commands below to provide key information from your environment:\r\nYou do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n**Description**\r\nRDMA test runs on TD2 with 202012 are quite flaky. Different set of test failures are seen daily and sometimes test fails at pretest\r\n09/09 run skipped all tgen tests with the following reason\r\nSKIPPED [1] /azp/agent/_work/27/s/tests/common/helpers/assertions.py:13: Port is not mapped to the expected DUT\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport yaml\nimport os\nimport traceback\nimport subprocess\nimport ipaddr as ipaddress\nfrom operator import itemgetter\nfrom itertools import groupby\nfrom collections import defaultdict\nimport re\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nDOCUMENTATION = '''\nmodule: testbed_vm_info.py\nAnsible_version_added: 2.0.0.2\nshort_description: Gather all related VMs info\nDescription:\n When deploy testbed topology with VM connected to SONiC, gather neighbor VMs info for generating SONiC minigraph file\n options:\n base_vm: base vm name defined in testbed.csv for the deployed topology; required: True\n topo: topology name defined in testbed.csv for the deployed topology; required: True\n vm_file: the virtual machine file path ; default: 'veos'\n\nAnsible_facts:\n 'neighbor_eosvm_mgmt': all VM hosts management IPs\n 'topoall': topology information\n\n'''\n\nEXAMPLES = '''\n - name: gather vm information\n testbed_vm_info: base_vm='VM0100' topo='t1' vm_file='veos'\n'''\n\n### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here\nTOPO_PATH = 'vars/'\nVM_INV_FILE = 'veos'\n\n\nclass TestbedVMFacts():\n \"\"\"\n Retrieve testbed VMs management information that for a specified toplogy defined in testbed.csv\n\n \"\"\"\n\n def __init__(self, toponame, vmbase, vmfile):\n CLET_SUFFIX = \"-clet\"\n toponame = re.sub(CLET_SUFFIX + \"$\", \"\", toponame)\n self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'\n self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n self.vmhosts = {}\n self.vmfile = vmfile\n self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)\n return\n\n\n def get_neighbor_eos(self):\n eos = {}\n with open(self.topofile) as f:\n vm_topology = yaml.load(f)\n self.topoall = vm_topology\n for vm in vm_topology['topology']['VMs']:\n vm_index = int(vm_topology['topology']['VMs'][vm]['vm_offset'])+self.start_index\n eos[vm] = vm_index\n return eos\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n base_vm=dict(required=True, type='str'),\n topo=dict(required=True, type='str'),\n vm_file=dict(default=VM_INV_FILE, type='str')\n ),\n supports_check_mode=True\n )\n m_args = module.params\n topo_type = m_args['topo']\n if 'ptf' in topo_type:\n module.exit_json(ansible_facts={'neighbor_eosvm_mgmt': {}})\n try:\n vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])\n neighbor_eos = vmsall.get_neighbor_eos()\n for eos in neighbor_eos:\n vmname = 'VM'+format(neighbor_eos[eos], '04d')\n if vmname in vmsall.inv_mgr.hosts:\n vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']\n else:\n err_msg = \"cannot find the vm \" + vmname + \" in VM inventory file, please make sure you have enough VMs for the topology you are using\"\n module.fail_json(msg=err_msg)\n module.exit_json(ansible_facts={'neighbor_eosvm_mgmt':vmsall.vmhosts, 'topoall': vmsall.topoall})\n except (IOError, OSError):\n module.fail_json(msg=\"Can not find file \"+vmsall.topofile+\" or \"+m_args['vm_file']+\" or \"+VM_INV_FILE)\n except Exception as e:\n module.fail_json(msg=traceback.format_exc())\n\nfrom ansible.module_utils.basic import *\nif __name__ == \"__main__\":\n main()\n\n", "path": "ansible/library/testbed_vm_info.py"}]} | 2,026 | 523 |
gh_patches_debug_24704 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-1839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Quantpendia failed to upload to S3
### Context
We kicked off quantpendia jobs for all organisms but they weren't succeeding because they couldn't upload to S3.
### Problem or idea
This is probably just because the worker instances don't have access to the compendia S3 bucket. The smasher probably has those permissions, but it looks like the workers don't.
### Solution or next step
Give worker instances permissions to push to the compendia S3 bucket.
</issue>
<code>
[start of workers/data_refinery_workers/processors/create_quantpendia.py]
1 import os
2 import logging
3 import shutil
4 import time
5 from django.utils import timezone
6 from typing import Dict, List, Tuple
7 import psutil
8
9 from data_refinery_common.job_lookup import PipelineEnum
10 from data_refinery_common.logging import get_and_configure_logger
11 from data_refinery_common.models import (ComputationalResult,
12 ComputedFile,
13 Organism,
14 Pipeline,
15 Sample)
16 from data_refinery_common.utils import get_env_variable
17 from data_refinery_workers.processors import smashing_utils, utils
18
19 S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
20 SMASHING_DIR = "/home/user/data_store/smashed/"
21
22 logger = get_and_configure_logger(__name__)
23 logger.setLevel(logging.getLevelName('DEBUG'))
24
25 def create_quantpendia(job_id: int) -> None:
26 pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
27 job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
28 [utils.start_job,
29 make_dirs,
30 download_files,
31 create_result_objects,
32 remove_job_dir,
33 utils.end_job])
34 return job_context
35
36
37 def download_files(job_context: Dict) -> Dict:
38 job_context['time_start'] = timezone.now()
39
40 num_samples = 0
41 for key, samples in job_context['samples'].items():
42 outfile_dir = job_context['output_dir'] + key + '/'
43 os.makedirs(outfile_dir, exist_ok=True)
44
45 logger.debug("Downloading quant.sf files for quantpendia.",
46 accession_code=key,
47 job_id=job_context['job_id'],
48 **get_process_stats())
49
50 # download quant.sf files directly into the dataset folder
51 num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
52
53 job_context['num_samples'] = num_samples
54 job_context['time_end'] = timezone.now()
55 job_context['formatted_command'] = "create_quantpendia.py"
56
57 logger.debug("Finished downloading quant.sf files for quantpendia.",
58 job_id=job_context['job_id'],
59 total_downloaded_files=num_samples,
60 **get_process_stats())
61
62 return job_context
63
64
65 def create_result_objects(job_context: Dict) -> Dict:
66 """
67 Store and host the result as a ComputationalResult object.
68 """
69 result = ComputationalResult()
70 result.commands.append(" ".join(job_context['formatted_command']))
71 result.is_ccdl = True
72 result.is_public = True
73 result.time_start = job_context['time_start']
74 result.time_end = job_context['time_end']
75 try:
76 processor_key = "CREATE_QUANTPENDIA"
77 result.processor = utils.find_processor(processor_key)
78 except Exception as e:
79 return utils.handle_processor_exception(job_context, processor_key, e)
80 result.save()
81
82 compendia_organism = _get_organisms(job_context['samples']).first()
83
84 # Create the resulting archive
85 smashing_utils.write_non_data_files(job_context)
86 final_zip_base = job_context['job_dir'] + compendia_organism.name + "_rnaseq_compendia"
87 shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
88
89 archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
90 compendia_version = _get_next_compendia_version(compendia_organism)
91
92 archive_computed_file = ComputedFile()
93
94 archive_computed_file.absolute_file_path = archive_path
95 archive_computed_file.filename = archive_path.split('/')[-1]
96 archive_computed_file.calculate_sha1()
97 archive_computed_file.calculate_size()
98 archive_computed_file.is_smashable = False
99 archive_computed_file.is_qn_target = False
100 archive_computed_file.result = result
101 archive_computed_file.is_compendia = True
102 archive_computed_file.quant_sf_only = True
103 archive_computed_file.compendia_organism = compendia_organism
104 archive_computed_file.compendia_version = compendia_version
105 archive_computed_file.save()
106
107 logger.info("Quantpendia created!",
108 archive_path=archive_path,
109 organism_name=compendia_organism.name)
110
111 # Upload the result to S3
112 timestamp = str(int(time.time()))
113 s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
114 archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
115
116 job_context['result'] = result
117 job_context['computed_files'] = [archive_computed_file]
118 job_context['success'] = True
119
120 return job_context
121
122
123 def remove_job_dir(job_context: Dict):
124 """ remove the directory when the job is successful. At this point
125 the quantpendia was already zipped and uploaded. """
126 shutil.rmtree(job_context["job_dir"], ignore_errors=True)
127 return job_context
128
129 def make_dirs(job_context: Dict):
130 dataset_id = str(job_context["dataset"].pk)
131 job_context["job_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
132 os.makedirs(job_context["job_dir"], exist_ok=True)
133 job_context["output_dir"] = job_context["job_dir"] + "output/"
134 os.makedirs(job_context["output_dir"], exist_ok=True)
135 return job_context
136
137 def get_process_stats():
138 BYTES_IN_GB = 1024 * 1024 * 1024
139 process = psutil.Process(os.getpid())
140 ram_in_GB = process.memory_info().rss / BYTES_IN_GB
141 return { 'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB }
142
143
144 def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
145 organisms = set()
146 for key, samples in aggregated_samples.items():
147 organism_ids = samples.values_list('organism__id', flat=True).distinct()
148 organisms.update(organism_ids)
149
150 return Organism.objects.filter(id__in=list(organisms))
151
152
153 def _get_next_compendia_version(organism: Organism) -> int:
154 last_compendia = ComputedFile.objects\
155 .filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\
156 .order_by('-compendia_version').first()
157
158 if last_compendia:
159 return last_compendia.compendia_version + 1
160
161 # otherwise this is the first compendia that we are generating
162 return 1
163
[end of workers/data_refinery_workers/processors/create_quantpendia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/workers/data_refinery_workers/processors/create_quantpendia.py b/workers/data_refinery_workers/processors/create_quantpendia.py
--- a/workers/data_refinery_workers/processors/create_quantpendia.py
+++ b/workers/data_refinery_workers/processors/create_quantpendia.py
@@ -3,6 +3,7 @@
import shutil
import time
from django.utils import timezone
+from django.conf import settings
from typing import Dict, List, Tuple
import psutil
@@ -114,7 +115,6 @@
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
- job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
return job_context
@@ -123,7 +123,9 @@
def remove_job_dir(job_context: Dict):
""" remove the directory when the job is successful. At this point
the quantpendia was already zipped and uploaded. """
- shutil.rmtree(job_context["job_dir"], ignore_errors=True)
+ # don't remove the files when running locally or for tests
+ if settings.RUNNING_IN_CLOUD:
+ shutil.rmtree(job_context["job_dir"], ignore_errors=True)
return job_context
def make_dirs(job_context: Dict):
| {"golden_diff": "diff --git a/workers/data_refinery_workers/processors/create_quantpendia.py b/workers/data_refinery_workers/processors/create_quantpendia.py\n--- a/workers/data_refinery_workers/processors/create_quantpendia.py\n+++ b/workers/data_refinery_workers/processors/create_quantpendia.py\n@@ -3,6 +3,7 @@\n import shutil\n import time\n from django.utils import timezone\n+from django.conf import settings\n from typing import Dict, List, Tuple\n import psutil\n \n@@ -114,7 +115,6 @@\n archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)\n \n job_context['result'] = result\n- job_context['computed_files'] = [archive_computed_file]\n job_context['success'] = True\n \n return job_context\n@@ -123,7 +123,9 @@\n def remove_job_dir(job_context: Dict):\n \"\"\" remove the directory when the job is successful. At this point\n the quantpendia was already zipped and uploaded. \"\"\"\n- shutil.rmtree(job_context[\"job_dir\"], ignore_errors=True)\n+ # don't remove the files when running locally or for tests\n+ if settings.RUNNING_IN_CLOUD:\n+ shutil.rmtree(job_context[\"job_dir\"], ignore_errors=True)\n return job_context\n \n def make_dirs(job_context: Dict):\n", "issue": "Quantpendia failed to upload to S3\n### Context\r\n\r\nWe kicked off quantpendia jobs for all organisms but they weren't succeeding because they couldn't upload to S3.\r\n\r\n### Problem or idea\r\n\r\nThis is probably just because the worker instances don't have access to the compendia S3 bucket. The smasher probably has those permissions, but it looks like the workers don't.\r\n\r\n### Solution or next step\r\n\r\nGive worker instances permissions to push to the compendia S3 bucket.\n", "before_files": [{"content": "import os\nimport logging\nimport shutil\nimport time\nfrom django.utils import timezone\nfrom typing import Dict, List, Tuple\nimport psutil\n\nfrom data_refinery_common.job_lookup import PipelineEnum\nfrom data_refinery_common.logging import get_and_configure_logger\nfrom data_refinery_common.models import (ComputationalResult,\n ComputedFile,\n Organism,\n Pipeline,\n Sample)\nfrom data_refinery_common.utils import get_env_variable\nfrom data_refinery_workers.processors import smashing_utils, utils\n\nS3_BUCKET_NAME = get_env_variable(\"S3_BUCKET_NAME\", \"data-refinery\")\nSMASHING_DIR = \"/home/user/data_store/smashed/\"\n\nlogger = get_and_configure_logger(__name__)\nlogger.setLevel(logging.getLevelName('DEBUG'))\n\ndef create_quantpendia(job_id: int) -> None:\n pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)\n job_context = utils.run_pipeline({\"job_id\": job_id, \"pipeline\": pipeline},\n [utils.start_job,\n make_dirs,\n download_files,\n create_result_objects,\n remove_job_dir,\n utils.end_job])\n return job_context\n\n\ndef download_files(job_context: Dict) -> Dict:\n job_context['time_start'] = timezone.now()\n\n num_samples = 0\n for key, samples in job_context['samples'].items():\n outfile_dir = job_context['output_dir'] + key + '/'\n os.makedirs(outfile_dir, exist_ok=True)\n\n logger.debug(\"Downloading quant.sf files for quantpendia.\",\n accession_code=key,\n job_id=job_context['job_id'],\n **get_process_stats())\n\n # download quant.sf files directly into the dataset folder\n num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)\n\n job_context['num_samples'] = num_samples\n job_context['time_end'] = timezone.now()\n job_context['formatted_command'] = \"create_quantpendia.py\"\n\n logger.debug(\"Finished downloading quant.sf files for quantpendia.\",\n job_id=job_context['job_id'],\n total_downloaded_files=num_samples,\n **get_process_stats())\n\n return job_context\n\n\ndef create_result_objects(job_context: Dict) -> Dict:\n \"\"\"\n Store and host the result as a ComputationalResult object.\n \"\"\"\n result = ComputationalResult()\n result.commands.append(\" \".join(job_context['formatted_command']))\n result.is_ccdl = True\n result.is_public = True\n result.time_start = job_context['time_start']\n result.time_end = job_context['time_end']\n try:\n processor_key = \"CREATE_QUANTPENDIA\"\n result.processor = utils.find_processor(processor_key)\n except Exception as e:\n return utils.handle_processor_exception(job_context, processor_key, e)\n result.save()\n\n compendia_organism = _get_organisms(job_context['samples']).first()\n\n # Create the resulting archive\n smashing_utils.write_non_data_files(job_context)\n final_zip_base = job_context['job_dir'] + compendia_organism.name + \"_rnaseq_compendia\"\n shutil.copy(\"/home/user/README_QUANT.md\", job_context[\"output_dir\"] + \"/README.md\")\n\n archive_path = shutil.make_archive(final_zip_base, 'zip', job_context[\"output_dir\"])\n compendia_version = _get_next_compendia_version(compendia_organism)\n\n archive_computed_file = ComputedFile()\n\n archive_computed_file.absolute_file_path = archive_path\n archive_computed_file.filename = archive_path.split('/')[-1]\n archive_computed_file.calculate_sha1()\n archive_computed_file.calculate_size()\n archive_computed_file.is_smashable = False\n archive_computed_file.is_qn_target = False\n archive_computed_file.result = result\n archive_computed_file.is_compendia = True\n archive_computed_file.quant_sf_only = True\n archive_computed_file.compendia_organism = compendia_organism\n archive_computed_file.compendia_version = compendia_version\n archive_computed_file.save()\n\n logger.info(\"Quantpendia created!\",\n archive_path=archive_path,\n organism_name=compendia_organism.name)\n\n # Upload the result to S3\n timestamp = str(int(time.time()))\n s3_key = compendia_organism.name + \"_\" + str(compendia_version) + \"_\" + timestamp + \".zip\"\n archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)\n\n job_context['result'] = result\n job_context['computed_files'] = [archive_computed_file]\n job_context['success'] = True\n\n return job_context\n\n\ndef remove_job_dir(job_context: Dict):\n \"\"\" remove the directory when the job is successful. At this point\n the quantpendia was already zipped and uploaded. \"\"\"\n shutil.rmtree(job_context[\"job_dir\"], ignore_errors=True)\n return job_context\n\ndef make_dirs(job_context: Dict):\n dataset_id = str(job_context[\"dataset\"].pk)\n job_context[\"job_dir\"] = \"/home/user/data_store/smashed/\" + dataset_id + \"/\"\n os.makedirs(job_context[\"job_dir\"], exist_ok=True)\n job_context[\"output_dir\"] = job_context[\"job_dir\"] + \"output/\"\n os.makedirs(job_context[\"output_dir\"], exist_ok=True)\n return job_context\n\ndef get_process_stats():\n BYTES_IN_GB = 1024 * 1024 * 1024\n process = psutil.Process(os.getpid())\n ram_in_GB = process.memory_info().rss / BYTES_IN_GB\n return { 'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB }\n\n\ndef _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:\n organisms = set()\n for key, samples in aggregated_samples.items():\n organism_ids = samples.values_list('organism__id', flat=True).distinct()\n organisms.update(organism_ids)\n\n return Organism.objects.filter(id__in=list(organisms))\n\n\ndef _get_next_compendia_version(organism: Organism) -> int:\n last_compendia = ComputedFile.objects\\\n .filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\\\n .order_by('-compendia_version').first()\n\n if last_compendia:\n return last_compendia.compendia_version + 1\n\n # otherwise this is the first compendia that we are generating\n return 1\n", "path": "workers/data_refinery_workers/processors/create_quantpendia.py"}]} | 2,444 | 294 |
gh_patches_debug_190 | rasdani/github-patches | git_diff | facebookresearch__fairseq-62 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
installation from source requires installing cffi
This is a very minor documentation issue
note: using python3/pip3 as there is a comment about requiring python 3 for fairseq-py
not using anaconda..I have had issues with package consistency..so I avoid it
fairseq-py installed with
git clone https://github.com/facebookresearch/fairseq-py.git
sudo pip3 install -r requirements.txt
levinth@zt-gpu-lin-1:~/fairseq-py$ sudo python3 setup.py build
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/torch/utils/ffi/__init__.py", line 12, in <module>
import cffi
ImportError: No module named 'cffi'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "setup.py", line 13, in <module>
from torch.utils.ffi import create_extension
File "/usr/local/lib/python3.5/dist-packages/torch/utils/ffi/__init__.py", line 14, in <module>
raise ImportError("torch.utils.ffi requires the cffi package")
ImportError: torch.utils.ffi requires the cffi package
levinth@zt-gpu-lin-1:~/fairseq-py$ pip3 install cffi
and then the build worked
likely can be fixed by adding cffii to requirements.txt
</issue>
<code>
[start of fairseq/progress_bar.py]
1 # Copyright (c) 2017-present, Facebook, Inc.
2 # All rights reserved.
3 #
4 # This source code is licensed under the license found in the LICENSE file in
5 # the root directory of this source tree. An additional grant of patent rights
6 # can be found in the PATENTS file in the same directory.
7 #
8
9 """
10 Wrapper around various loggers and progress bars (e.g., tqdm).
11 """
12
13 from collections import OrderedDict
14 import json
15 from numbers import Number
16 import sys
17
18 from tqdm import tqdm
19
20 from fairseq.meters import AverageMeter
21
22
23 class progress_bar(object):
24 """Abstract class for progress bars."""
25 def __init__(self, iterable, epoch=None, prefix=None):
26 self.iterable = iterable
27 self.epoch = epoch
28 self.prefix = ''
29 if epoch is not None:
30 self.prefix += '| epoch {:03d}'.format(epoch)
31 if prefix is not None:
32 self.prefix += ' | {}'.format(prefix)
33
34 def __enter__(self):
35 return self
36
37 def __exit__(self, *exc):
38 return False
39
40 def __iter__(self):
41 raise NotImplementedError
42
43 def log(self, stats):
44 """Log intermediate stats according to log_interval."""
45 raise NotImplementedError
46
47 def print(self, stats):
48 """Print end-of-epoch stats."""
49 raise NotImplementedError
50
51 def _str_commas(self, stats):
52 return ', '.join(key + '=' + stats[key].strip()
53 for key in stats.keys())
54
55 def _str_pipes(self, stats):
56 return ' | '.join(key + ' ' + stats[key].strip()
57 for key in stats.keys())
58
59 def _format_stats(self, stats):
60 postfix = OrderedDict(stats)
61 # Preprocess stats according to datatype
62 for key in postfix.keys():
63 # Number: limit the length of the string
64 if isinstance(postfix[key], Number):
65 postfix[key] = '{:g}'.format(postfix[key])
66 # Meter: display both current and average value
67 elif isinstance(postfix[key], AverageMeter):
68 postfix[key] = '{:.2f} ({:.2f})'.format(
69 postfix[key].val, postfix[key].avg)
70 # Else for any other type, try to get the string conversion
71 elif not isinstance(postfix[key], str):
72 postfix[key] = str(postfix[key])
73 # Else if it's a string, don't need to preprocess anything
74 return postfix
75
76
77 class json_progress_bar(progress_bar):
78 """Log output in JSON format."""
79
80 def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
81 super().__init__(iterable, epoch, prefix)
82 self.log_interval = log_interval
83 self.stats = None
84
85 def __iter__(self):
86 size = float(len(self.iterable))
87 for i, obj in enumerate(self.iterable):
88 yield obj
89 if self.stats is not None and i > 0 and \
90 self.log_interval is not None and i % self.log_interval == 0:
91 update = self.epoch + float(i / size) if self.epoch is not None else None
92 stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
93 print('sweep_log: ' + json.dumps(stats), flush=True)
94
95 def log(self, stats):
96 """Log intermediate stats according to log_interval."""
97 self.stats = stats
98
99 def print(self, stats):
100 """Print end-of-epoch stats."""
101 stats = self._format_stats(self.stats, epoch=self.epoch)
102 print("sweep_log: " + json.dumps(stats), flush=True)
103
104 def _format_stats(self, stats, epoch=None, update=None):
105 postfix = OrderedDict()
106 if epoch is not None:
107 postfix['epoch'] = epoch
108 if update is not None:
109 postfix['update'] = update
110 # Preprocess stats according to datatype
111 for key in stats.keys():
112 # Meter: display both current and average value
113 if isinstance(stats[key], AverageMeter):
114 postfix[key] = stats[key].val
115 postfix[key + '_avg'] = stats[key].avg
116 else:
117 postfix[key] = stats[key]
118 return postfix
119
120
121 class noop_progress_bar(progress_bar):
122 """No logging."""
123
124 def __init__(self, iterable, epoch=None, prefix=None):
125 super().__init__(iterable, epoch, prefix)
126
127 def __iter__(self):
128 for obj in self.iterable:
129 yield obj
130
131 def log(self, stats):
132 """Log intermediate stats according to log_interval."""
133 pass
134
135 def print(self, stats):
136 """Print end-of-epoch stats."""
137 pass
138
139
140 class simple_progress_bar(progress_bar):
141 """A minimal logger for non-TTY environments."""
142
143 def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
144 super().__init__(iterable, epoch, prefix)
145 self.log_interval = log_interval
146 self.stats = None
147
148 def __iter__(self):
149 size = len(self.iterable)
150 for i, obj in enumerate(self.iterable):
151 yield obj
152 if self.stats is not None and i > 0 and \
153 self.log_interval is not None and i % self.log_interval == 0:
154 postfix = self._str_commas(self.stats)
155 print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix),
156 flush=True)
157
158 def log(self, stats):
159 """Log intermediate stats according to log_interval."""
160 self.stats = self._format_stats(stats)
161
162 def print(self, stats):
163 """Print end-of-epoch stats."""
164 postfix = self._str_pipes(self._format_stats(stats))
165 print('{} | {}'.format(self.prefix, postfix), flush=True)
166
167
168 class tqdm_progress_bar(progress_bar):
169 """Log to tqdm."""
170
171 def __init__(self, iterable, epoch=None, prefix=None):
172 super().__init__(iterable, epoch, prefix)
173 self.tqdm = tqdm(iterable, self.prefix, leave=False)
174
175 def __iter__(self):
176 return iter(self.tqdm)
177
178 def log(self, stats):
179 """Log intermediate stats according to log_interval."""
180 self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
181
182 def print(self, stats):
183 """Print end-of-epoch stats."""
184 postfix = self._str_pipes(self._format_stats(stats))
185 self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
186
[end of fairseq/progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairseq/progress_bar.py b/fairseq/progress_bar.py
--- a/fairseq/progress_bar.py
+++ b/fairseq/progress_bar.py
@@ -13,7 +13,6 @@
from collections import OrderedDict
import json
from numbers import Number
-import sys
from tqdm import tqdm
| {"golden_diff": "diff --git a/fairseq/progress_bar.py b/fairseq/progress_bar.py\n--- a/fairseq/progress_bar.py\n+++ b/fairseq/progress_bar.py\n@@ -13,7 +13,6 @@\n from collections import OrderedDict\n import json\n from numbers import Number\n-import sys\n \n from tqdm import tqdm\n", "issue": "installation from source requires installing cffi\nThis is a very minor documentation issue\r\nnote: using python3/pip3 as there is a comment about requiring python 3 for fairseq-py\r\nnot using anaconda..I have had issues with package consistency..so I avoid it\r\nfairseq-py installed with \r\ngit clone https://github.com/facebookresearch/fairseq-py.git\r\nsudo pip3 install -r requirements.txt \r\n\r\nlevinth@zt-gpu-lin-1:~/fairseq-py$ sudo python3 setup.py build\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/torch/utils/ffi/__init__.py\", line 12, in <module>\r\n import cffi\r\nImportError: No module named 'cffi'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 13, in <module>\r\n from torch.utils.ffi import create_extension\r\n File \"/usr/local/lib/python3.5/dist-packages/torch/utils/ffi/__init__.py\", line 14, in <module>\r\n raise ImportError(\"torch.utils.ffi requires the cffi package\")\r\nImportError: torch.utils.ffi requires the cffi package\r\nlevinth@zt-gpu-lin-1:~/fairseq-py$ pip3 install cffi\r\n\r\nand then the build worked\r\nlikely can be fixed by adding cffii to requirements.txt\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n#\n\n\"\"\"\nWrapper around various loggers and progress bars (e.g., tqdm).\n\"\"\"\n\nfrom collections import OrderedDict\nimport json\nfrom numbers import Number\nimport sys\n\nfrom tqdm import tqdm\n\nfrom fairseq.meters import AverageMeter\n\n\nclass progress_bar(object):\n \"\"\"Abstract class for progress bars.\"\"\"\n def __init__(self, iterable, epoch=None, prefix=None):\n self.iterable = iterable\n self.epoch = epoch\n self.prefix = ''\n if epoch is not None:\n self.prefix += '| epoch {:03d}'.format(epoch)\n if prefix is not None:\n self.prefix += ' | {}'.format(prefix)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n return False\n\n def __iter__(self):\n raise NotImplementedError\n\n def log(self, stats):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n raise NotImplementedError\n\n def print(self, stats):\n \"\"\"Print end-of-epoch stats.\"\"\"\n raise NotImplementedError\n\n def _str_commas(self, stats):\n return ', '.join(key + '=' + stats[key].strip()\n for key in stats.keys())\n\n def _str_pipes(self, stats):\n return ' | '.join(key + ' ' + stats[key].strip()\n for key in stats.keys())\n\n def _format_stats(self, stats):\n postfix = OrderedDict(stats)\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = '{:g}'.format(postfix[key])\n # Meter: display both current and average value\n elif isinstance(postfix[key], AverageMeter):\n postfix[key] = '{:.2f} ({:.2f})'.format(\n postfix[key].val, postfix[key].avg)\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n return postfix\n\n\nclass json_progress_bar(progress_bar):\n \"\"\"Log output in JSON format.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n\n def __iter__(self):\n size = float(len(self.iterable))\n for i, obj in enumerate(self.iterable):\n yield obj\n if self.stats is not None and i > 0 and \\\n self.log_interval is not None and i % self.log_interval == 0:\n update = self.epoch + float(i / size) if self.epoch is not None else None\n stats = self._format_stats(self.stats, epoch=self.epoch, update=update)\n print('sweep_log: ' + json.dumps(stats), flush=True)\n\n def log(self, stats):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = stats\n\n def print(self, stats):\n \"\"\"Print end-of-epoch stats.\"\"\"\n stats = self._format_stats(self.stats, epoch=self.epoch)\n print(\"sweep_log: \" + json.dumps(stats), flush=True)\n\n def _format_stats(self, stats, epoch=None, update=None):\n postfix = OrderedDict()\n if epoch is not None:\n postfix['epoch'] = epoch\n if update is not None:\n postfix['update'] = update\n # Preprocess stats according to datatype\n for key in stats.keys():\n # Meter: display both current and average value\n if isinstance(stats[key], AverageMeter):\n postfix[key] = stats[key].val\n postfix[key + '_avg'] = stats[key].avg\n else:\n postfix[key] = stats[key]\n return postfix\n\n\nclass noop_progress_bar(progress_bar):\n \"\"\"No logging.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n\n def __iter__(self):\n for obj in self.iterable:\n yield obj\n\n def log(self, stats):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n pass\n\n def print(self, stats):\n \"\"\"Print end-of-epoch stats.\"\"\"\n pass\n\n\nclass simple_progress_bar(progress_bar):\n \"\"\"A minimal logger for non-TTY environments.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n\n def __iter__(self):\n size = len(self.iterable)\n for i, obj in enumerate(self.iterable):\n yield obj\n if self.stats is not None and i > 0 and \\\n self.log_interval is not None and i % self.log_interval == 0:\n postfix = self._str_commas(self.stats)\n print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix),\n flush=True)\n\n def log(self, stats):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = self._format_stats(stats)\n\n def print(self, stats):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n print('{} | {}'.format(self.prefix, postfix), flush=True)\n\n\nclass tqdm_progress_bar(progress_bar):\n \"\"\"Log to tqdm.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n self.tqdm = tqdm(iterable, self.prefix, leave=False)\n\n def __iter__(self):\n return iter(self.tqdm)\n\n def log(self, stats):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.tqdm.set_postfix(self._format_stats(stats), refresh=False)\n\n def print(self, stats):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))\n", "path": "fairseq/progress_bar.py"}]} | 2,708 | 73 |
gh_patches_debug_15268 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add commas around constrained maximal likelihood in docstring for clarity
# Description
In PR #905 the docstring for [`pyhf.infer.mle.fixed_poi_fit`](https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html) was amended, but the lines
https://github.com/scikit-hep/pyhf/blob/fd7930cce36cbc3a2d0ee1828f060d7382129579/src/pyhf/infer/mle.py#L134-L135
are missing commas around the likelihood, making it difficult to read

It should read
```
,:math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)`, in the profile
```
Add commas around constrained maximal likelihood in docstring for clarity
# Description
In PR #905 the docstring for [`pyhf.infer.mle.fixed_poi_fit`](https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html) was amended, but the lines
https://github.com/scikit-hep/pyhf/blob/fd7930cce36cbc3a2d0ee1828f060d7382129579/src/pyhf/infer/mle.py#L134-L135
are missing commas around the likelihood, making it difficult to read

It should read
```
,:math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)`, in the profile
```
</issue>
<code>
[start of src/pyhf/infer/mle.py]
1 """Module for Maximum Likelihood Estimation."""
2 from .. import get_backend
3 from ..exceptions import UnspecifiedPOI
4
5
6 def twice_nll(pars, data, pdf):
7 r"""
8 Two times the negative log-likelihood of the model parameters, :math:`\left(\mu, \boldsymbol{\theta}\right)`, given the observed data.
9 It is used in the calculation of the test statistic, :math:`t_{\mu}`, as defiend in Equation (8) in :xref:`arXiv:1007.1727`
10
11 .. math::
12
13 t_{\mu} = -2\ln\lambda\left(\mu\right)
14
15 where :math:`\lambda\left(\mu\right)` is the profile likelihood ratio as defined in Equation (7)
16
17 .. math::
18
19 \lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}\,.
20
21 It serves as the objective function to minimize in :func:`~pyhf.infer.mle.fit`
22 and :func:`~pyhf.infer.mle.fixed_poi_fit`.
23
24 Example:
25 >>> import pyhf
26 >>> pyhf.set_backend("numpy")
27 >>> model = pyhf.simplemodels.hepdata_like(
28 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
29 ... )
30 >>> observations = [51, 48]
31 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
32 >>> parameters = model.config.suggested_init() # nominal parameters
33 >>> twice_nll = pyhf.infer.mle.twice_nll(parameters, data, model)
34 >>> twice_nll
35 array([30.77525435])
36 >>> -2 * model.logpdf(parameters, data) == twice_nll
37 array([ True])
38
39 Args:
40 pars (:obj:`tensor`): The parameters of the HistFactory model
41 data (:obj:`tensor`): The data to be considered
42 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
43
44 Returns:
45 Tensor: Two times the negative log-likelihood, :math:`-2\ln L\left(\mu, \boldsymbol{\theta}\right)`
46 """
47 return -2 * pdf.logpdf(pars, data)
48
49
50 def _validate_fit_inputs(init_pars, par_bounds, fixed_params):
51 for par_idx, (value, bound) in enumerate(zip(init_pars, par_bounds)):
52 if not (bound[0] <= value <= bound[1]):
53 raise ValueError(
54 f"fit initialization parameter (index: {par_idx}, value: {value}) lies outside of its bounds: {bound}"
55 + "\nTo correct this adjust the initialization parameter values in the model spec or those given"
56 + "\nas arguments to pyhf.infer.fit. If this value is intended, adjust the range of the parameter"
57 + "\nbounds."
58 )
59
60
61 def fit(data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs):
62 r"""
63 Run a maximum likelihood fit.
64 This is done by minimizing the objective function :func:`~pyhf.infer.mle.twice_nll`
65 of the model parameters given the observed data.
66 This is used to produce the maximal likelihood :math:`L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)`
67 in the profile likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`
68
69 .. math::
70
71 \lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}
72
73
74 .. note::
75
76 :func:`twice_nll` is the objective function given to the optimizer and
77 is returned evaluated at the best fit model parameters when the optional
78 kwarg ``return_fitted_val`` is ``True``.
79
80 Example:
81 >>> import pyhf
82 >>> pyhf.set_backend("numpy")
83 >>> model = pyhf.simplemodels.hepdata_like(
84 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
85 ... )
86 >>> observations = [51, 48]
87 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
88 >>> bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True)
89 >>> bestfit_pars
90 array([0. , 1.0030512 , 0.96266961])
91 >>> twice_nll
92 array(24.98393521)
93 >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll
94 array([ True])
95
96 Args:
97 data (:obj:`tensor`): The data
98 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
99 init_pars (:obj:`list`): Values to initialize the model parameters at for the fit
100 par_bounds (:obj:`list` of :obj:`list`\s or :obj:`tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
101 fixed_params (:obj:`list`): Parameters to be held constant in the fit.
102 kwargs: Keyword arguments passed through to the optimizer API
103
104 Returns:
105 See optimizer API
106
107 """
108 _, opt = get_backend()
109 init_pars = init_pars or pdf.config.suggested_init()
110 par_bounds = par_bounds or pdf.config.suggested_bounds()
111 fixed_params = fixed_params or pdf.config.suggested_fixed()
112
113 _validate_fit_inputs(init_pars, par_bounds, fixed_params)
114
115 # get fixed vals from the model
116 fixed_vals = [
117 (index, init)
118 for index, (init, is_fixed) in enumerate(zip(init_pars, fixed_params))
119 if is_fixed
120 ]
121
122 return opt.minimize(
123 twice_nll, data, pdf, init_pars, par_bounds, fixed_vals, **kwargs
124 )
125
126
127 def fixed_poi_fit(
128 poi_val, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs
129 ):
130 r"""
131 Run a maximum likelihood fit with the POI value fixed.
132 This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`
133 of the model parameters given the observed data, for a given fixed value of :math:`\mu`.
134 This is used to produce the constrained maximal likelihood for the given :math:`\mu`
135 :math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)` in the profile
136 likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`
137
138 .. math::
139
140 \lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}
141
142 .. note::
143
144 :func:`twice_nll` is the objective function given to the optimizer and
145 is returned evaluated at the best fit model parameters when the optional
146 kwarg ``return_fitted_val`` is ``True``.
147
148 Example:
149 >>> import pyhf
150 >>> pyhf.set_backend("numpy")
151 >>> model = pyhf.simplemodels.hepdata_like(
152 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
153 ... )
154 >>> observations = [51, 48]
155 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
156 >>> test_poi = 1.0
157 >>> bestfit_pars, twice_nll = pyhf.infer.mle.fixed_poi_fit(
158 ... test_poi, data, model, return_fitted_val=True
159 ... )
160 >>> bestfit_pars
161 array([1. , 0.97224597, 0.87553894])
162 >>> twice_nll
163 array(28.92218013)
164 >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll
165 array([ True])
166
167 Args:
168 data: The data
169 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
170 init_pars (:obj:`list`): Values to initialize the model parameters at for the fit
171 par_bounds (:obj:`list` of :obj:`list`\s or :obj:`tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
172 fixed_params (:obj:`list`): Parameters to be held constant in the fit.
173 kwargs: Keyword arguments passed through to the optimizer API
174
175 Returns:
176 See optimizer API
177
178 """
179 if pdf.config.poi_index is None:
180 raise UnspecifiedPOI(
181 'No POI is defined. A POI is required to fit with a fixed POI.'
182 )
183
184 init_pars = [*(init_pars or pdf.config.suggested_init())]
185 fixed_params = [*(fixed_params or pdf.config.suggested_fixed())]
186
187 init_pars[pdf.config.poi_index] = poi_val
188 fixed_params[pdf.config.poi_index] = True
189
190 return fit(data, pdf, init_pars, par_bounds, fixed_params, **kwargs)
191
[end of src/pyhf/infer/mle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py
--- a/src/pyhf/infer/mle.py
+++ b/src/pyhf/infer/mle.py
@@ -131,8 +131,8 @@
Run a maximum likelihood fit with the POI value fixed.
This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`
of the model parameters given the observed data, for a given fixed value of :math:`\mu`.
- This is used to produce the constrained maximal likelihood for the given :math:`\mu`
- :math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)` in the profile
+ This is used to produce the constrained maximal likelihood for the given :math:`\mu`,
+ :math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)`, in the profile
likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`
.. math::
| {"golden_diff": "diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py\n--- a/src/pyhf/infer/mle.py\n+++ b/src/pyhf/infer/mle.py\n@@ -131,8 +131,8 @@\n Run a maximum likelihood fit with the POI value fixed.\n This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`\n of the model parameters given the observed data, for a given fixed value of :math:`\\mu`.\n- This is used to produce the constrained maximal likelihood for the given :math:`\\mu`\n- :math:`L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)` in the profile\n+ This is used to produce the constrained maximal likelihood for the given :math:`\\mu`,\n+ :math:`L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)`, in the profile\n likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`\n \n .. math::\n", "issue": "Add commas around constrained maximal likelihood in docstring for clarity\n# Description\r\n\r\nIn PR #905 the docstring for [`pyhf.infer.mle.fixed_poi_fit`](https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html) was amended, but the lines\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/fd7930cce36cbc3a2d0ee1828f060d7382129579/src/pyhf/infer/mle.py#L134-L135\r\n\r\nare missing commas around the likelihood, making it difficult to read\r\n\r\n\r\n\r\nIt should read\r\n\r\n```\r\n,:math:`L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)`, in the profile \r\n```\r\n\nAdd commas around constrained maximal likelihood in docstring for clarity\n# Description\r\n\r\nIn PR #905 the docstring for [`pyhf.infer.mle.fixed_poi_fit`](https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html) was amended, but the lines\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/fd7930cce36cbc3a2d0ee1828f060d7382129579/src/pyhf/infer/mle.py#L134-L135\r\n\r\nare missing commas around the likelihood, making it difficult to read\r\n\r\n\r\n\r\nIt should read\r\n\r\n```\r\n,:math:`L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)`, in the profile \r\n```\r\n\n", "before_files": [{"content": "\"\"\"Module for Maximum Likelihood Estimation.\"\"\"\nfrom .. import get_backend\nfrom ..exceptions import UnspecifiedPOI\n\n\ndef twice_nll(pars, data, pdf):\n r\"\"\"\n Two times the negative log-likelihood of the model parameters, :math:`\\left(\\mu, \\boldsymbol{\\theta}\\right)`, given the observed data.\n It is used in the calculation of the test statistic, :math:`t_{\\mu}`, as defiend in Equation (8) in :xref:`arXiv:1007.1727`\n\n .. math::\n\n t_{\\mu} = -2\\ln\\lambda\\left(\\mu\\right)\n\n where :math:`\\lambda\\left(\\mu\\right)` is the profile likelihood ratio as defined in Equation (7)\n\n .. math::\n\n \\lambda\\left(\\mu\\right) = \\frac{L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)}{L\\left(\\hat{\\mu}, \\hat{\\boldsymbol{\\theta}}\\right)}\\,.\n\n It serves as the objective function to minimize in :func:`~pyhf.infer.mle.fit`\n and :func:`~pyhf.infer.mle.fixed_poi_fit`.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> parameters = model.config.suggested_init() # nominal parameters\n >>> twice_nll = pyhf.infer.mle.twice_nll(parameters, data, model)\n >>> twice_nll\n array([30.77525435])\n >>> -2 * model.logpdf(parameters, data) == twice_nll\n array([ True])\n\n Args:\n pars (:obj:`tensor`): The parameters of the HistFactory model\n data (:obj:`tensor`): The data to be considered\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n\n Returns:\n Tensor: Two times the negative log-likelihood, :math:`-2\\ln L\\left(\\mu, \\boldsymbol{\\theta}\\right)`\n \"\"\"\n return -2 * pdf.logpdf(pars, data)\n\n\ndef _validate_fit_inputs(init_pars, par_bounds, fixed_params):\n for par_idx, (value, bound) in enumerate(zip(init_pars, par_bounds)):\n if not (bound[0] <= value <= bound[1]):\n raise ValueError(\n f\"fit initialization parameter (index: {par_idx}, value: {value}) lies outside of its bounds: {bound}\"\n + \"\\nTo correct this adjust the initialization parameter values in the model spec or those given\"\n + \"\\nas arguments to pyhf.infer.fit. If this value is intended, adjust the range of the parameter\"\n + \"\\nbounds.\"\n )\n\n\ndef fit(data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs):\n r\"\"\"\n Run a maximum likelihood fit.\n This is done by minimizing the objective function :func:`~pyhf.infer.mle.twice_nll`\n of the model parameters given the observed data.\n This is used to produce the maximal likelihood :math:`L\\left(\\hat{\\mu}, \\hat{\\boldsymbol{\\theta}}\\right)`\n in the profile likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\lambda\\left(\\mu\\right) = \\frac{L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)}{L\\left(\\hat{\\mu}, \\hat{\\boldsymbol{\\theta}}\\right)}\n\n\n .. note::\n\n :func:`twice_nll` is the objective function given to the optimizer and\n is returned evaluated at the best fit model parameters when the optional\n kwarg ``return_fitted_val`` is ``True``.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True)\n >>> bestfit_pars\n array([0. , 1.0030512 , 0.96266961])\n >>> twice_nll\n array(24.98393521)\n >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll\n array([ True])\n\n Args:\n data (:obj:`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (:obj:`list`): Values to initialize the model parameters at for the fit\n par_bounds (:obj:`list` of :obj:`list`\\s or :obj:`tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n fixed_params (:obj:`list`): Parameters to be held constant in the fit.\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n fixed_params = fixed_params or pdf.config.suggested_fixed()\n\n _validate_fit_inputs(init_pars, par_bounds, fixed_params)\n\n # get fixed vals from the model\n fixed_vals = [\n (index, init)\n for index, (init, is_fixed) in enumerate(zip(init_pars, fixed_params))\n if is_fixed\n ]\n\n return opt.minimize(\n twice_nll, data, pdf, init_pars, par_bounds, fixed_vals, **kwargs\n )\n\n\ndef fixed_poi_fit(\n poi_val, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs\n):\n r\"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`\n of the model parameters given the observed data, for a given fixed value of :math:`\\mu`.\n This is used to produce the constrained maximal likelihood for the given :math:`\\mu`\n :math:`L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)` in the profile\n likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\lambda\\left(\\mu\\right) = \\frac{L\\left(\\mu, \\hat{\\hat{\\boldsymbol{\\theta}}}\\right)}{L\\left(\\hat{\\mu}, \\hat{\\boldsymbol{\\theta}}\\right)}\n\n .. note::\n\n :func:`twice_nll` is the objective function given to the optimizer and\n is returned evaluated at the best fit model parameters when the optional\n kwarg ``return_fitted_val`` is ``True``.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> bestfit_pars, twice_nll = pyhf.infer.mle.fixed_poi_fit(\n ... test_poi, data, model, return_fitted_val=True\n ... )\n >>> bestfit_pars\n array([1. , 0.97224597, 0.87553894])\n >>> twice_nll\n array(28.92218013)\n >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll\n array([ True])\n\n Args:\n data: The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (:obj:`list`): Values to initialize the model parameters at for the fit\n par_bounds (:obj:`list` of :obj:`list`\\s or :obj:`tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n fixed_params (:obj:`list`): Parameters to be held constant in the fit.\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n if pdf.config.poi_index is None:\n raise UnspecifiedPOI(\n 'No POI is defined. A POI is required to fit with a fixed POI.'\n )\n\n init_pars = [*(init_pars or pdf.config.suggested_init())]\n fixed_params = [*(fixed_params or pdf.config.suggested_fixed())]\n\n init_pars[pdf.config.poi_index] = poi_val\n fixed_params[pdf.config.poi_index] = True\n\n return fit(data, pdf, init_pars, par_bounds, fixed_params, **kwargs)\n", "path": "src/pyhf/infer/mle.py"}]} | 3,698 | 246 |
gh_patches_debug_21180 | rasdani/github-patches | git_diff | Princeton-CDH__geniza-477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
As a front-end user I want to see the PGP logo on the site
here are links to the temporary logo approved till we finish revising the permanent one:
- [for desktop light mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16460)
- [for mobile light mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16461)
- [for desktop dark mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16466)
- [for mobile dark mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2301%3A16480)
</issue>
<code>
[start of geniza/pages/models.py]
1 from django.db import models
2 from django.http.response import HttpResponseRedirect
3 from wagtail.admin.edit_handlers import FieldPanel, RichTextFieldPanel
4 from wagtail.core.fields import RichTextField
5 from wagtail.core.models import Page
6
7
8 class HomePage(Page):
9 """:class:`wagtail.core.models.Page` model for Geniza home page."""
10
11 # fields
12 description = models.TextField(blank=True)
13 body = RichTextField(
14 features=[
15 "h2",
16 "h3",
17 "bold",
18 "italic",
19 "link",
20 "ol",
21 "ul",
22 "image",
23 "embed",
24 "blockquote",
25 "superscript",
26 "subscript",
27 "strikethrough",
28 ],
29 blank=True,
30 )
31 # can only be child of Root
32 parent_page_types = [Page]
33 subpage_types = ["pages.ContentPage", "pages.ContainerPage"]
34 content_panels = Page.content_panels + [
35 FieldPanel("description"),
36 RichTextFieldPanel("body"),
37 ]
38
39 class Meta:
40 verbose_name = "homepage"
41
42
43 class ContainerPage(Page):
44 """An empty :class:`Page` type that has :class:`ContentPage` instances
45 as its subpages."""
46
47 # can only be child of HomePage
48 parent_page_types = [HomePage]
49 subpage_types = ["pages.ContentPage"]
50
51 # show in menu by default
52 show_in_menus_default = True
53
54 # should not ever actually render
55 def serve(self, request):
56 # redirect to parent page instead
57 if self.get_parent():
58 return HttpResponseRedirect(self.get_parent().get_url(request))
59
60
61 class ContentPage(Page):
62 """A simple :class:`Page` type for content pages."""
63
64 # fields
65 description = models.TextField(blank=True)
66 body = RichTextField(
67 features=[
68 "h2",
69 "h3",
70 "bold",
71 "italic",
72 "link",
73 "ol",
74 "ul",
75 "image",
76 "embed",
77 "blockquote",
78 "superscript",
79 "subscript",
80 "strikethrough",
81 ],
82 blank=True,
83 )
84 # can be child of Home or Container page
85 parent_page_types = [HomePage, ContainerPage]
86 content_panels = Page.content_panels + [
87 FieldPanel("description"),
88 RichTextFieldPanel("body"),
89 ]
90
91 def get_context(self, request):
92 context = super(ContentPage, self).get_context(request)
93 context["page_type"] = "content-page"
94 return context
95
[end of geniza/pages/models.py]
[start of geniza/pages/management/commands/bootstrap_content.py]
1 from django.core.exceptions import ObjectDoesNotExist
2 from django.core.management.base import BaseCommand
3 from django.templatetags.static import static
4 from wagtail.core.models import Page
5 from wagtail.core.models.i18n import Locale
6 from wagtail.core.models.sites import Site
7
8 from geniza.pages.models import ContainerPage, ContentPage, HomePage
9
10
11 class Command(BaseCommand):
12 def add_arguments(self, parser):
13 parser.add_argument(
14 "-H",
15 "--hostname",
16 default="localhost",
17 help="hostname from which the app is served (default: localhost)",
18 )
19 parser.add_argument(
20 "-p",
21 "--port",
22 default="8000",
23 help="port from which the app is served (default: 8000)",
24 )
25 parser.add_argument(
26 "-f",
27 "--fixtures",
28 action="store_true",
29 help="include test fixture content page",
30 )
31
32 def handle(self, *args, **options):
33 """Bootstrap content for Geniza public project site.
34 NOTE: Not idempotent. Will recreate pages if they already exist."""
35
36 include_fixtures = options.get("fixtures")
37 hostname = options.get("hostname")
38 port = options.get("port")
39 (locale, _) = Locale.objects.get_or_create(language_code="en")
40
41 # Bootstrap empty home page, about page
42 home_page = HomePage(
43 title="The Princeton Geniza Project",
44 description="Home page",
45 locale=locale,
46 )
47
48 root = Page.get_first_root_node()
49 root.add_child(instance=home_page)
50
51 container_page = ContainerPage(title="About", slug="about", locale=locale)
52 home_page.add_child(instance=container_page)
53
54 # Bootstrap other empty content pages
55
56 # Pages for main navigation menu
57 root_pages = [
58 ContentPage(
59 title="Contact Us",
60 slug="contact",
61 description="Contact information",
62 locale=locale,
63 ),
64 ]
65 for page in root_pages:
66 page.show_in_menus = True
67 home_page.add_child(instance=page)
68
69 # Pages for About sub-navigation menu
70 container_pages = [
71 ContentPage(
72 title="Credits",
73 slug="credits",
74 description="List of Geniza Project contributors and their roles",
75 locale=locale,
76 ),
77 ContentPage(
78 title="How to Cite",
79 slug="how-to-cite",
80 description="Instructions for citing the Princeton Geniza Project",
81 locale=locale,
82 ),
83 ContentPage(
84 title="Data Exports",
85 slug="data-exports",
86 description="Information about exporting data",
87 locale=locale,
88 ),
89 ContentPage(
90 title="Technical",
91 slug="technical",
92 description="Technical information",
93 locale=locale,
94 ),
95 ContentPage(
96 title="FAQ",
97 slug="faq",
98 description="Frequently asked questions",
99 locale=locale,
100 ),
101 ]
102 for page in container_pages:
103 page.show_in_menus = True
104 container_page.add_child(instance=page)
105
106 if include_fixtures:
107 # Create test page
108 test_content_page = self.generate_test_content_page()
109 home_page.add_child(instance=test_content_page)
110
111 # Create or update site with page tree and other options
112 try:
113 default_site = Site.objects.get(is_default_site=True)
114 default_site.root_page = home_page
115 default_site.port = port
116 default_site.hostname = hostname
117 default_site.site_name = "Geniza"
118 default_site.save()
119 except ObjectDoesNotExist:
120 default_site = Site.objects.create(
121 hostname=hostname,
122 port=port,
123 root_page=home_page,
124 is_default_site=True,
125 site_name="Geniza",
126 )
127
128 def generate_test_content_page(self):
129 # Create test content page from fixture
130 with open(
131 "geniza/pages/fixtures/example_content_page.html", "r"
132 ) as content_fixture:
133 content = content_fixture.read()
134 return ContentPage(
135 title="Page Title",
136 description="Example page",
137 slug="content",
138 body=content.replace( # get static URLs for test images
139 "test-image-fragment.jpg", static("test-image-fragment.jpg")
140 ).replace("test-image-tagnetwork.png", static("test-image-tagnetwork.png")),
141 live=True,
142 )
143
[end of geniza/pages/management/commands/bootstrap_content.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geniza/pages/management/commands/bootstrap_content.py b/geniza/pages/management/commands/bootstrap_content.py
--- a/geniza/pages/management/commands/bootstrap_content.py
+++ b/geniza/pages/management/commands/bootstrap_content.py
@@ -136,7 +136,11 @@
description="Example page",
slug="content",
body=content.replace( # get static URLs for test images
- "test-image-fragment.jpg", static("test-image-fragment.jpg")
- ).replace("test-image-tagnetwork.png", static("test-image-tagnetwork.png")),
+ "test-image-fragment.jpg",
+ static("img/fixtures/test-image-fragment.jpg"),
+ ).replace(
+ "test-image-tagnetwork.png",
+ static("img/fixtures/test-image-tagnetwork.png"),
+ ),
live=True,
)
diff --git a/geniza/pages/models.py b/geniza/pages/models.py
--- a/geniza/pages/models.py
+++ b/geniza/pages/models.py
@@ -39,6 +39,11 @@
class Meta:
verbose_name = "homepage"
+ def get_context(self, request):
+ context = super(HomePage, self).get_context(request)
+ context["page_type"] = "homepage"
+ return context
+
class ContainerPage(Page):
"""An empty :class:`Page` type that has :class:`ContentPage` instances
| {"golden_diff": "diff --git a/geniza/pages/management/commands/bootstrap_content.py b/geniza/pages/management/commands/bootstrap_content.py\n--- a/geniza/pages/management/commands/bootstrap_content.py\n+++ b/geniza/pages/management/commands/bootstrap_content.py\n@@ -136,7 +136,11 @@\n description=\"Example page\",\n slug=\"content\",\n body=content.replace( # get static URLs for test images\n- \"test-image-fragment.jpg\", static(\"test-image-fragment.jpg\")\n- ).replace(\"test-image-tagnetwork.png\", static(\"test-image-tagnetwork.png\")),\n+ \"test-image-fragment.jpg\",\n+ static(\"img/fixtures/test-image-fragment.jpg\"),\n+ ).replace(\n+ \"test-image-tagnetwork.png\",\n+ static(\"img/fixtures/test-image-tagnetwork.png\"),\n+ ),\n live=True,\n )\ndiff --git a/geniza/pages/models.py b/geniza/pages/models.py\n--- a/geniza/pages/models.py\n+++ b/geniza/pages/models.py\n@@ -39,6 +39,11 @@\n class Meta:\n verbose_name = \"homepage\"\n \n+ def get_context(self, request):\n+ context = super(HomePage, self).get_context(request)\n+ context[\"page_type\"] = \"homepage\"\n+ return context\n+\n \n class ContainerPage(Page):\n \"\"\"An empty :class:`Page` type that has :class:`ContentPage` instances\n", "issue": "As a front-end user I want to see the PGP logo on the site\nhere are links to the temporary logo approved till we finish revising the permanent one: \n\n- [for desktop light mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16460)\n- [for mobile light mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16461)\n- [for desktop dark mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2300%3A16466)\n- [for mobile dark mode](https://www.figma.com/file/HpGBOZi9lO8B3nCAx3d4Fj/Princeton-Geniza-(Project-team)?node-id=2301%3A16480)\n\n", "before_files": [{"content": "from django.db import models\nfrom django.http.response import HttpResponseRedirect\nfrom wagtail.admin.edit_handlers import FieldPanel, RichTextFieldPanel\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.core.models import Page\n\n\nclass HomePage(Page):\n \"\"\":class:`wagtail.core.models.Page` model for Geniza home page.\"\"\"\n\n # fields\n description = models.TextField(blank=True)\n body = RichTextField(\n features=[\n \"h2\",\n \"h3\",\n \"bold\",\n \"italic\",\n \"link\",\n \"ol\",\n \"ul\",\n \"image\",\n \"embed\",\n \"blockquote\",\n \"superscript\",\n \"subscript\",\n \"strikethrough\",\n ],\n blank=True,\n )\n # can only be child of Root\n parent_page_types = [Page]\n subpage_types = [\"pages.ContentPage\", \"pages.ContainerPage\"]\n content_panels = Page.content_panels + [\n FieldPanel(\"description\"),\n RichTextFieldPanel(\"body\"),\n ]\n\n class Meta:\n verbose_name = \"homepage\"\n\n\nclass ContainerPage(Page):\n \"\"\"An empty :class:`Page` type that has :class:`ContentPage` instances\n as its subpages.\"\"\"\n\n # can only be child of HomePage\n parent_page_types = [HomePage]\n subpage_types = [\"pages.ContentPage\"]\n\n # show in menu by default\n show_in_menus_default = True\n\n # should not ever actually render\n def serve(self, request):\n # redirect to parent page instead\n if self.get_parent():\n return HttpResponseRedirect(self.get_parent().get_url(request))\n\n\nclass ContentPage(Page):\n \"\"\"A simple :class:`Page` type for content pages.\"\"\"\n\n # fields\n description = models.TextField(blank=True)\n body = RichTextField(\n features=[\n \"h2\",\n \"h3\",\n \"bold\",\n \"italic\",\n \"link\",\n \"ol\",\n \"ul\",\n \"image\",\n \"embed\",\n \"blockquote\",\n \"superscript\",\n \"subscript\",\n \"strikethrough\",\n ],\n blank=True,\n )\n # can be child of Home or Container page\n parent_page_types = [HomePage, ContainerPage]\n content_panels = Page.content_panels + [\n FieldPanel(\"description\"),\n RichTextFieldPanel(\"body\"),\n ]\n\n def get_context(self, request):\n context = super(ContentPage, self).get_context(request)\n context[\"page_type\"] = \"content-page\"\n return context\n", "path": "geniza/pages/models.py"}, {"content": "from django.core.exceptions import ObjectDoesNotExist\nfrom django.core.management.base import BaseCommand\nfrom django.templatetags.static import static\nfrom wagtail.core.models import Page\nfrom wagtail.core.models.i18n import Locale\nfrom wagtail.core.models.sites import Site\n\nfrom geniza.pages.models import ContainerPage, ContentPage, HomePage\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"-H\",\n \"--hostname\",\n default=\"localhost\",\n help=\"hostname from which the app is served (default: localhost)\",\n )\n parser.add_argument(\n \"-p\",\n \"--port\",\n default=\"8000\",\n help=\"port from which the app is served (default: 8000)\",\n )\n parser.add_argument(\n \"-f\",\n \"--fixtures\",\n action=\"store_true\",\n help=\"include test fixture content page\",\n )\n\n def handle(self, *args, **options):\n \"\"\"Bootstrap content for Geniza public project site.\n NOTE: Not idempotent. Will recreate pages if they already exist.\"\"\"\n\n include_fixtures = options.get(\"fixtures\")\n hostname = options.get(\"hostname\")\n port = options.get(\"port\")\n (locale, _) = Locale.objects.get_or_create(language_code=\"en\")\n\n # Bootstrap empty home page, about page\n home_page = HomePage(\n title=\"The Princeton Geniza Project\",\n description=\"Home page\",\n locale=locale,\n )\n\n root = Page.get_first_root_node()\n root.add_child(instance=home_page)\n\n container_page = ContainerPage(title=\"About\", slug=\"about\", locale=locale)\n home_page.add_child(instance=container_page)\n\n # Bootstrap other empty content pages\n\n # Pages for main navigation menu\n root_pages = [\n ContentPage(\n title=\"Contact Us\",\n slug=\"contact\",\n description=\"Contact information\",\n locale=locale,\n ),\n ]\n for page in root_pages:\n page.show_in_menus = True\n home_page.add_child(instance=page)\n\n # Pages for About sub-navigation menu\n container_pages = [\n ContentPage(\n title=\"Credits\",\n slug=\"credits\",\n description=\"List of Geniza Project contributors and their roles\",\n locale=locale,\n ),\n ContentPage(\n title=\"How to Cite\",\n slug=\"how-to-cite\",\n description=\"Instructions for citing the Princeton Geniza Project\",\n locale=locale,\n ),\n ContentPage(\n title=\"Data Exports\",\n slug=\"data-exports\",\n description=\"Information about exporting data\",\n locale=locale,\n ),\n ContentPage(\n title=\"Technical\",\n slug=\"technical\",\n description=\"Technical information\",\n locale=locale,\n ),\n ContentPage(\n title=\"FAQ\",\n slug=\"faq\",\n description=\"Frequently asked questions\",\n locale=locale,\n ),\n ]\n for page in container_pages:\n page.show_in_menus = True\n container_page.add_child(instance=page)\n\n if include_fixtures:\n # Create test page\n test_content_page = self.generate_test_content_page()\n home_page.add_child(instance=test_content_page)\n\n # Create or update site with page tree and other options\n try:\n default_site = Site.objects.get(is_default_site=True)\n default_site.root_page = home_page\n default_site.port = port\n default_site.hostname = hostname\n default_site.site_name = \"Geniza\"\n default_site.save()\n except ObjectDoesNotExist:\n default_site = Site.objects.create(\n hostname=hostname,\n port=port,\n root_page=home_page,\n is_default_site=True,\n site_name=\"Geniza\",\n )\n\n def generate_test_content_page(self):\n # Create test content page from fixture\n with open(\n \"geniza/pages/fixtures/example_content_page.html\", \"r\"\n ) as content_fixture:\n content = content_fixture.read()\n return ContentPage(\n title=\"Page Title\",\n description=\"Example page\",\n slug=\"content\",\n body=content.replace( # get static URLs for test images\n \"test-image-fragment.jpg\", static(\"test-image-fragment.jpg\")\n ).replace(\"test-image-tagnetwork.png\", static(\"test-image-tagnetwork.png\")),\n live=True,\n )\n", "path": "geniza/pages/management/commands/bootstrap_content.py"}]} | 2,803 | 312 |
gh_patches_debug_39856 | rasdani/github-patches | git_diff | ckan__ckan-4102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
recaptcha v1 will stop working 2018-3-31
### CKAN Version if known (or site URL)
All since 2.0ish
### The problem
Users will not be able to register, due to the re-captcha being switched off by Google on 2018-3-31.
Google's deprecation info: https://developers.google.com/recaptcha/docs/versions#v1
This affects those sites that:
* have setup recaptcha (i.e. registered with Google and [added their keys to the CKAN config](http://docs.ckan.org/en/latest/maintaining/configuration.html?highlight=captcha#ckan-recaptcha-publickey)). This is not part of the default CKAN install. Most installs only use the 'user' functionality for admins, so the impact should be limited.
* AND they use v1 of recaptcha. This is the default in the CKAN config, but Google deprecated v1 May 2016 and prevented new sites using it (I imagine it was the same time), so the issue only affects sites set-up before then.
### How to check if you have this problem
Show the relevant bits of your CKAN config:
```
grep recaptcha /etc/ckan/default/production.ini
```
IF `ckan.recaptcha.publickey` has a value (i.e. not blank or unset)
AND (`ckan.recaptcha.version = 1` OR `ckan.recaptcha.version` is blank or unset)
THEN you need to upgrade to recaptcha 2 before 2018-03-31.
### Action
I think we should change the default to v2 and warn the community.
</issue>
<code>
[start of ckan/lib/captcha.py]
1 # encoding: utf-8
2
3 from ckan.common import config
4
5 import urllib
6 import urllib2
7 import json
8
9 def check_recaptcha(request):
10 '''Check a user\'s recaptcha submission is valid, and raise CaptchaError
11 on failure.'''
12 recaptcha_private_key = config.get('ckan.recaptcha.privatekey', '')
13 if not recaptcha_private_key:
14 # Recaptcha not enabled
15 return
16
17 client_ip_address = request.environ.get('REMOTE_ADDR', 'Unknown IP Address')
18
19 recaptcha_version = config.get('ckan.recaptcha.version', '1')
20 if recaptcha_version is '1':
21 recaptcha_response_field = request.params.get('recaptcha_response_field', '')
22 recaptcha_server_name = 'http://api-verify.recaptcha.net/verify'
23 recaptcha_challenge_field = request.params.get('recaptcha_challenge_field')
24
25 # recaptcha_response_field will be unicode if there are foreign chars in
26 # the user input. So we need to encode it as utf8 before urlencoding or
27 # we get an exception (#1431).
28 params = urllib.urlencode(dict(privatekey=recaptcha_private_key,
29 remoteip=client_ip_address,
30 challenge=recaptcha_challenge_field,
31 response=recaptcha_response_field.encode('utf8')))
32 f = urllib2.urlopen(recaptcha_server_name, params)
33 data = f.read()
34 f.close()
35
36 if not data.lower().startswith('true'):
37 raise CaptchaError()
38 elif recaptcha_version is '2':
39 recaptcha_response_field = request.params.get('g-recaptcha-response', '')
40 recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'
41
42 # recaptcha_response_field will be unicode if there are foreign chars in
43 # the user input. So we need to encode it as utf8 before urlencoding or
44 # we get an exception (#1431).
45 params = urllib.urlencode(dict(secret=recaptcha_private_key,
46 remoteip=client_ip_address,
47 response=recaptcha_response_field.encode('utf8')))
48 f = urllib2.urlopen(recaptcha_server_name, params)
49 data = json.load(f)
50 f.close()
51
52 try:
53 if not data['success']:
54 raise CaptchaError()
55 except IndexError:
56 # Something weird with recaptcha response
57 raise CaptchaError()
58
59 class CaptchaError(ValueError):
60 pass
[end of ckan/lib/captcha.py]
[start of ckan/lib/app_globals.py]
1 # encoding: utf-8
2
3 ''' The application's Globals object '''
4
5 import logging
6 import time
7 from threading import Lock
8 import re
9
10 from paste.deploy.converters import asbool
11 from ckan.common import config
12
13 import ckan
14 import ckan.model as model
15 import ckan.logic as logic
16 from logic.schema import update_configuration_schema
17
18
19 log = logging.getLogger(__name__)
20
21
22 # mappings translate between config settings and globals because our naming
23 # conventions are not well defined and/or implemented
24 mappings = {
25 # 'config_key': 'globals_key',
26 }
27
28
29 # This mapping is only used to define the configuration options (from the
30 # `config` object) that should be copied to the `app_globals` (`g`) object.
31 app_globals_from_config_details = {
32 'ckan.site_title': {},
33 'ckan.site_logo': {},
34 'ckan.site_url': {},
35 'ckan.site_description': {},
36 'ckan.site_about': {},
37 'ckan.site_intro_text': {},
38 'ckan.site_custom_css': {},
39 'ckan.favicon': {}, # default gets set in config.environment.py
40 'ckan.template_head_end': {},
41 'ckan.template_footer_end': {},
42 # has been setup in load_environment():
43 'ckan.site_id': {},
44 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},
45 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},
46 'ckan.template_title_deliminater': {'default': '-'},
47 'ckan.template_head_end': {},
48 'ckan.template_footer_end': {},
49 'ckan.dumps_url': {},
50 'ckan.dumps_format': {},
51 'ofs.impl': {'name': 'ofs_impl'},
52 'ckan.homepage_style': {'default': '1'},
53
54 # split string
55 'search.facets': {'default': 'organization groups tags res_format license_id',
56 'type': 'split',
57 'name': 'facets'},
58 'package_hide_extras': {'type': 'split'},
59 'ckan.plugins': {'type': 'split'},
60
61 # bool
62 'debug': {'default': 'false', 'type' : 'bool'},
63 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},
64 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},
65 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},
66
67 # int
68 'ckan.datasets_per_page': {'default': '20', 'type': 'int'},
69 'ckan.activity_list_limit': {'default': '30', 'type': 'int'},
70 'ckan.user_list_limit': {'default': '20', 'type': 'int'},
71 'search.facets.default': {'default': '10', 'type': 'int',
72 'name': 'facets_default_number'},
73 }
74
75
76 # A place to store the origional config options of we override them
77 _CONFIG_CACHE = {}
78
79 def set_main_css(css_file):
80 ''' Sets the main_css. The css_file must be of the form file.css '''
81 assert css_file.endswith('.css')
82 new_css = css_file
83 # FIXME we should check the css file exists
84 app_globals.main_css = str(new_css)
85
86
87 def set_app_global(key, value):
88 '''
89 Set a new key on the app_globals (g) object
90
91 It will process the value according to the options on
92 app_globals_from_config_details (if any)
93 '''
94 key, value = process_app_global(key, value)
95 setattr(app_globals, key, value)
96
97
98 def process_app_global(key, value):
99 '''
100 Tweak a key, value pair meant to be set on the app_globals (g) object
101
102 According to the options on app_globals_from_config_details (if any)
103 '''
104 options = app_globals_from_config_details.get(key)
105 key = get_globals_key(key)
106 if options:
107 if 'name' in options:
108 key = options['name']
109 value = value or options.get('default', '')
110
111 data_type = options.get('type')
112 if data_type == 'bool':
113 value = asbool(value)
114 elif data_type == 'int':
115 value = int(value)
116 elif data_type == 'split':
117 value = value.split()
118
119 return key, value
120
121
122 def get_globals_key(key):
123 # create our globals key
124 # these can be specified in mappings or else we remove
125 # the `ckan.` part this is to keep the existing namings
126 # set the value
127 if key in mappings:
128 return mappings[key]
129 elif key.startswith('ckan.'):
130 return key[5:]
131 else:
132 return key
133
134
135 def reset():
136 ''' set updatable values from config '''
137 def get_config_value(key, default=''):
138 if model.meta.engine.has_table('system_info'):
139 value = model.get_system_info(key)
140 else:
141 value = None
142 config_value = config.get(key)
143 # sort encodeings if needed
144 if isinstance(config_value, str):
145 try:
146 config_value = config_value.decode('utf-8')
147 except UnicodeDecodeError:
148 config_value = config_value.decode('latin-1')
149 # we want to store the config the first time we get here so we can
150 # reset them if needed
151 if key not in _CONFIG_CACHE:
152 _CONFIG_CACHE[key] = config_value
153 if value is not None:
154 log.debug('config `%s` set to `%s` from db' % (key, value))
155 else:
156 value = _CONFIG_CACHE[key]
157 if value:
158 log.debug('config `%s` set to `%s` from config' % (key, value))
159 else:
160 value = default
161
162 set_app_global(key, value)
163
164 # update the config
165 config[key] = value
166
167 return value
168
169 # update the config settings in auto update
170 schema = update_configuration_schema()
171 for key in schema.keys():
172 get_config_value(key)
173
174 # custom styling
175 main_css = get_config_value('ckan.main_css', '/base/css/main.css')
176 set_main_css(main_css)
177
178 if app_globals.site_logo:
179 app_globals.header_class = 'header-image'
180 elif not app_globals.site_description:
181 app_globals.header_class = 'header-text-logo'
182 else:
183 app_globals.header_class = 'header-text-logo-tagline'
184
185
186 class _Globals(object):
187
188 ''' Globals acts as a container for objects available throughout the
189 life of the application. '''
190
191 def __init__(self):
192 '''One instance of Globals is created during application
193 initialization and is available during requests via the
194 'app_globals' variable
195 '''
196 self._init()
197 self._config_update = None
198 self._mutex = Lock()
199
200 def _check_uptodate(self):
201 ''' check the config is uptodate needed when several instances are
202 running '''
203 value = model.get_system_info('ckan.config_update')
204 if self._config_update != value:
205 if self._mutex.acquire(False):
206 reset()
207 self._config_update = value
208 self._mutex.release()
209
210 def _init(self):
211
212 self.ckan_version = ckan.__version__
213 self.ckan_base_version = re.sub('[^0-9\.]', '', self.ckan_version)
214 if self.ckan_base_version == self.ckan_version:
215 self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)
216 else:
217 self.ckan_doc_version = 'latest'
218
219 # process the config details to set globals
220 for key in app_globals_from_config_details.keys():
221 new_key, value = process_app_global(key, config.get(key) or '')
222 setattr(self, new_key, value)
223
224
225 app_globals = _Globals()
226 del _Globals
227
[end of ckan/lib/app_globals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/lib/app_globals.py b/ckan/lib/app_globals.py
--- a/ckan/lib/app_globals.py
+++ b/ckan/lib/app_globals.py
@@ -42,7 +42,6 @@
# has been setup in load_environment():
'ckan.site_id': {},
'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},
- 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},
'ckan.template_title_deliminater': {'default': '-'},
'ckan.template_head_end': {},
'ckan.template_footer_end': {},
diff --git a/ckan/lib/captcha.py b/ckan/lib/captcha.py
--- a/ckan/lib/captcha.py
+++ b/ckan/lib/captcha.py
@@ -13,48 +13,29 @@
if not recaptcha_private_key:
# Recaptcha not enabled
return
-
+
client_ip_address = request.environ.get('REMOTE_ADDR', 'Unknown IP Address')
-
- recaptcha_version = config.get('ckan.recaptcha.version', '1')
- if recaptcha_version is '1':
- recaptcha_response_field = request.params.get('recaptcha_response_field', '')
- recaptcha_server_name = 'http://api-verify.recaptcha.net/verify'
- recaptcha_challenge_field = request.params.get('recaptcha_challenge_field')
-
- # recaptcha_response_field will be unicode if there are foreign chars in
- # the user input. So we need to encode it as utf8 before urlencoding or
- # we get an exception (#1431).
- params = urllib.urlencode(dict(privatekey=recaptcha_private_key,
- remoteip=client_ip_address,
- challenge=recaptcha_challenge_field,
- response=recaptcha_response_field.encode('utf8')))
- f = urllib2.urlopen(recaptcha_server_name, params)
- data = f.read()
- f.close()
-
- if not data.lower().startswith('true'):
- raise CaptchaError()
- elif recaptcha_version is '2':
- recaptcha_response_field = request.params.get('g-recaptcha-response', '')
- recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'
-
- # recaptcha_response_field will be unicode if there are foreign chars in
- # the user input. So we need to encode it as utf8 before urlencoding or
- # we get an exception (#1431).
- params = urllib.urlencode(dict(secret=recaptcha_private_key,
- remoteip=client_ip_address,
- response=recaptcha_response_field.encode('utf8')))
- f = urllib2.urlopen(recaptcha_server_name, params)
- data = json.load(f)
- f.close()
-
- try:
- if not data['success']:
- raise CaptchaError()
- except IndexError:
- # Something weird with recaptcha response
+
+ # reCAPTCHA v2
+ recaptcha_response_field = request.params.get('g-recaptcha-response', '')
+ recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'
+
+ # recaptcha_response_field will be unicode if there are foreign chars in
+ # the user input. So we need to encode it as utf8 before urlencoding or
+ # we get an exception (#1431).
+ params = urllib.urlencode(dict(secret=recaptcha_private_key,
+ remoteip=client_ip_address,
+ response=recaptcha_response_field.encode('utf8')))
+ f = urllib2.urlopen(recaptcha_server_name, params)
+ data = json.load(f)
+ f.close()
+
+ try:
+ if not data['success']:
raise CaptchaError()
+ except IndexError:
+ # Something weird with recaptcha response
+ raise CaptchaError()
class CaptchaError(ValueError):
- pass
\ No newline at end of file
+ pass
| {"golden_diff": "diff --git a/ckan/lib/app_globals.py b/ckan/lib/app_globals.py\n--- a/ckan/lib/app_globals.py\n+++ b/ckan/lib/app_globals.py\n@@ -42,7 +42,6 @@\n # has been setup in load_environment():\n 'ckan.site_id': {},\n 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},\n- 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},\n 'ckan.template_title_deliminater': {'default': '-'},\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\ndiff --git a/ckan/lib/captcha.py b/ckan/lib/captcha.py\n--- a/ckan/lib/captcha.py\n+++ b/ckan/lib/captcha.py\n@@ -13,48 +13,29 @@\n if not recaptcha_private_key:\n # Recaptcha not enabled\n return\n- \n+\n client_ip_address = request.environ.get('REMOTE_ADDR', 'Unknown IP Address')\n- \n- recaptcha_version = config.get('ckan.recaptcha.version', '1')\n- if recaptcha_version is '1':\n- recaptcha_response_field = request.params.get('recaptcha_response_field', '')\n- recaptcha_server_name = 'http://api-verify.recaptcha.net/verify'\n- recaptcha_challenge_field = request.params.get('recaptcha_challenge_field')\n-\n- # recaptcha_response_field will be unicode if there are foreign chars in\n- # the user input. So we need to encode it as utf8 before urlencoding or\n- # we get an exception (#1431).\n- params = urllib.urlencode(dict(privatekey=recaptcha_private_key,\n- remoteip=client_ip_address,\n- challenge=recaptcha_challenge_field,\n- response=recaptcha_response_field.encode('utf8')))\n- f = urllib2.urlopen(recaptcha_server_name, params)\n- data = f.read()\n- f.close()\n- \n- if not data.lower().startswith('true'):\n- raise CaptchaError()\n- elif recaptcha_version is '2':\n- recaptcha_response_field = request.params.get('g-recaptcha-response', '')\n- recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'\n-\n- # recaptcha_response_field will be unicode if there are foreign chars in\n- # the user input. So we need to encode it as utf8 before urlencoding or\n- # we get an exception (#1431).\n- params = urllib.urlencode(dict(secret=recaptcha_private_key,\n- remoteip=client_ip_address,\n- response=recaptcha_response_field.encode('utf8')))\n- f = urllib2.urlopen(recaptcha_server_name, params)\n- data = json.load(f) \n- f.close()\n- \n- try:\n- if not data['success']:\n- raise CaptchaError()\n- except IndexError:\n- # Something weird with recaptcha response\n+\n+ # reCAPTCHA v2\n+ recaptcha_response_field = request.params.get('g-recaptcha-response', '')\n+ recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'\n+\n+ # recaptcha_response_field will be unicode if there are foreign chars in\n+ # the user input. So we need to encode it as utf8 before urlencoding or\n+ # we get an exception (#1431).\n+ params = urllib.urlencode(dict(secret=recaptcha_private_key,\n+ remoteip=client_ip_address,\n+ response=recaptcha_response_field.encode('utf8')))\n+ f = urllib2.urlopen(recaptcha_server_name, params)\n+ data = json.load(f)\n+ f.close()\n+\n+ try:\n+ if not data['success']:\n raise CaptchaError()\n+ except IndexError:\n+ # Something weird with recaptcha response\n+ raise CaptchaError()\n \n class CaptchaError(ValueError):\n- pass\n\\ No newline at end of file\n+ pass\n", "issue": "recaptcha v1 will stop working 2018-3-31\n### CKAN Version if known (or site URL)\r\nAll since 2.0ish\r\n\r\n### The problem\r\nUsers will not be able to register, due to the re-captcha being switched off by Google on 2018-3-31.\r\n\r\nGoogle's deprecation info: https://developers.google.com/recaptcha/docs/versions#v1\r\n\r\nThis affects those sites that:\r\n* have setup recaptcha (i.e. registered with Google and [added their keys to the CKAN config](http://docs.ckan.org/en/latest/maintaining/configuration.html?highlight=captcha#ckan-recaptcha-publickey)). This is not part of the default CKAN install. Most installs only use the 'user' functionality for admins, so the impact should be limited.\r\n* AND they use v1 of recaptcha. This is the default in the CKAN config, but Google deprecated v1 May 2016 and prevented new sites using it (I imagine it was the same time), so the issue only affects sites set-up before then.\r\n\r\n### How to check if you have this problem\r\nShow the relevant bits of your CKAN config:\r\n```\r\ngrep recaptcha /etc/ckan/default/production.ini\r\n```\r\nIF `ckan.recaptcha.publickey` has a value (i.e. not blank or unset)\r\nAND (`ckan.recaptcha.version = 1` OR `ckan.recaptcha.version` is blank or unset)\r\nTHEN you need to upgrade to recaptcha 2 before 2018-03-31.\r\n\r\n### Action\r\nI think we should change the default to v2 and warn the community.\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom ckan.common import config\n\nimport urllib\nimport urllib2\nimport json\n\ndef check_recaptcha(request):\n '''Check a user\\'s recaptcha submission is valid, and raise CaptchaError\n on failure.'''\n recaptcha_private_key = config.get('ckan.recaptcha.privatekey', '')\n if not recaptcha_private_key:\n # Recaptcha not enabled\n return\n \n client_ip_address = request.environ.get('REMOTE_ADDR', 'Unknown IP Address')\n \n recaptcha_version = config.get('ckan.recaptcha.version', '1')\n if recaptcha_version is '1':\n recaptcha_response_field = request.params.get('recaptcha_response_field', '')\n recaptcha_server_name = 'http://api-verify.recaptcha.net/verify'\n recaptcha_challenge_field = request.params.get('recaptcha_challenge_field')\n\n # recaptcha_response_field will be unicode if there are foreign chars in\n # the user input. So we need to encode it as utf8 before urlencoding or\n # we get an exception (#1431).\n params = urllib.urlencode(dict(privatekey=recaptcha_private_key,\n remoteip=client_ip_address,\n challenge=recaptcha_challenge_field,\n response=recaptcha_response_field.encode('utf8')))\n f = urllib2.urlopen(recaptcha_server_name, params)\n data = f.read()\n f.close()\n \n if not data.lower().startswith('true'):\n raise CaptchaError()\n elif recaptcha_version is '2':\n recaptcha_response_field = request.params.get('g-recaptcha-response', '')\n recaptcha_server_name = 'https://www.google.com/recaptcha/api/siteverify'\n\n # recaptcha_response_field will be unicode if there are foreign chars in\n # the user input. So we need to encode it as utf8 before urlencoding or\n # we get an exception (#1431).\n params = urllib.urlencode(dict(secret=recaptcha_private_key,\n remoteip=client_ip_address,\n response=recaptcha_response_field.encode('utf8')))\n f = urllib2.urlopen(recaptcha_server_name, params)\n data = json.load(f) \n f.close()\n \n try:\n if not data['success']:\n raise CaptchaError()\n except IndexError:\n # Something weird with recaptcha response\n raise CaptchaError()\n\nclass CaptchaError(ValueError):\n pass", "path": "ckan/lib/captcha.py"}, {"content": "# encoding: utf-8\n\n''' The application's Globals object '''\n\nimport logging\nimport time\nfrom threading import Lock\nimport re\n\nfrom paste.deploy.converters import asbool\nfrom ckan.common import config\n\nimport ckan\nimport ckan.model as model\nimport ckan.logic as logic\nfrom logic.schema import update_configuration_schema\n\n\nlog = logging.getLogger(__name__)\n\n\n# mappings translate between config settings and globals because our naming\n# conventions are not well defined and/or implemented\nmappings = {\n# 'config_key': 'globals_key',\n}\n\n\n# This mapping is only used to define the configuration options (from the\n# `config` object) that should be copied to the `app_globals` (`g`) object.\napp_globals_from_config_details = {\n 'ckan.site_title': {},\n 'ckan.site_logo': {},\n 'ckan.site_url': {},\n 'ckan.site_description': {},\n 'ckan.site_about': {},\n 'ckan.site_intro_text': {},\n 'ckan.site_custom_css': {},\n 'ckan.favicon': {}, # default gets set in config.environment.py\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n # has been setup in load_environment():\n 'ckan.site_id': {},\n 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'},\n 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'},\n 'ckan.template_title_deliminater': {'default': '-'},\n 'ckan.template_head_end': {},\n 'ckan.template_footer_end': {},\n 'ckan.dumps_url': {},\n 'ckan.dumps_format': {},\n 'ofs.impl': {'name': 'ofs_impl'},\n 'ckan.homepage_style': {'default': '1'},\n\n # split string\n 'search.facets': {'default': 'organization groups tags res_format license_id',\n 'type': 'split',\n 'name': 'facets'},\n 'package_hide_extras': {'type': 'split'},\n 'ckan.plugins': {'type': 'split'},\n\n # bool\n 'debug': {'default': 'false', 'type' : 'bool'},\n 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'},\n 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'},\n 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'},\n\n # int\n 'ckan.datasets_per_page': {'default': '20', 'type': 'int'},\n 'ckan.activity_list_limit': {'default': '30', 'type': 'int'},\n 'ckan.user_list_limit': {'default': '20', 'type': 'int'},\n 'search.facets.default': {'default': '10', 'type': 'int',\n 'name': 'facets_default_number'},\n}\n\n\n# A place to store the origional config options of we override them\n_CONFIG_CACHE = {}\n\ndef set_main_css(css_file):\n ''' Sets the main_css. The css_file must be of the form file.css '''\n assert css_file.endswith('.css')\n new_css = css_file\n # FIXME we should check the css file exists\n app_globals.main_css = str(new_css)\n\n\ndef set_app_global(key, value):\n '''\n Set a new key on the app_globals (g) object\n\n It will process the value according to the options on\n app_globals_from_config_details (if any)\n '''\n key, value = process_app_global(key, value)\n setattr(app_globals, key, value)\n\n\ndef process_app_global(key, value):\n '''\n Tweak a key, value pair meant to be set on the app_globals (g) object\n\n According to the options on app_globals_from_config_details (if any)\n '''\n options = app_globals_from_config_details.get(key)\n key = get_globals_key(key)\n if options:\n if 'name' in options:\n key = options['name']\n value = value or options.get('default', '')\n\n data_type = options.get('type')\n if data_type == 'bool':\n value = asbool(value)\n elif data_type == 'int':\n value = int(value)\n elif data_type == 'split':\n value = value.split()\n\n return key, value\n\n\ndef get_globals_key(key):\n # create our globals key\n # these can be specified in mappings or else we remove\n # the `ckan.` part this is to keep the existing namings\n # set the value\n if key in mappings:\n return mappings[key]\n elif key.startswith('ckan.'):\n return key[5:]\n else:\n return key\n\n\ndef reset():\n ''' set updatable values from config '''\n def get_config_value(key, default=''):\n if model.meta.engine.has_table('system_info'):\n value = model.get_system_info(key)\n else:\n value = None\n config_value = config.get(key)\n # sort encodeings if needed\n if isinstance(config_value, str):\n try:\n config_value = config_value.decode('utf-8')\n except UnicodeDecodeError:\n config_value = config_value.decode('latin-1')\n # we want to store the config the first time we get here so we can\n # reset them if needed\n if key not in _CONFIG_CACHE:\n _CONFIG_CACHE[key] = config_value\n if value is not None:\n log.debug('config `%s` set to `%s` from db' % (key, value))\n else:\n value = _CONFIG_CACHE[key]\n if value:\n log.debug('config `%s` set to `%s` from config' % (key, value))\n else:\n value = default\n\n set_app_global(key, value)\n\n # update the config\n config[key] = value\n\n return value\n\n # update the config settings in auto update\n schema = update_configuration_schema()\n for key in schema.keys():\n get_config_value(key)\n\n # custom styling\n main_css = get_config_value('ckan.main_css', '/base/css/main.css')\n set_main_css(main_css)\n\n if app_globals.site_logo:\n app_globals.header_class = 'header-image'\n elif not app_globals.site_description:\n app_globals.header_class = 'header-text-logo'\n else:\n app_globals.header_class = 'header-text-logo-tagline'\n\n\nclass _Globals(object):\n\n ''' Globals acts as a container for objects available throughout the\n life of the application. '''\n\n def __init__(self):\n '''One instance of Globals is created during application\n initialization and is available during requests via the\n 'app_globals' variable\n '''\n self._init()\n self._config_update = None\n self._mutex = Lock()\n\n def _check_uptodate(self):\n ''' check the config is uptodate needed when several instances are\n running '''\n value = model.get_system_info('ckan.config_update')\n if self._config_update != value:\n if self._mutex.acquire(False):\n reset()\n self._config_update = value\n self._mutex.release()\n\n def _init(self):\n\n self.ckan_version = ckan.__version__\n self.ckan_base_version = re.sub('[^0-9\\.]', '', self.ckan_version)\n if self.ckan_base_version == self.ckan_version:\n self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version)\n else:\n self.ckan_doc_version = 'latest'\n\n # process the config details to set globals\n for key in app_globals_from_config_details.keys():\n new_key, value = process_app_global(key, config.get(key) or '')\n setattr(self, new_key, value)\n\n\napp_globals = _Globals()\ndel _Globals\n", "path": "ckan/lib/app_globals.py"}]} | 3,838 | 887 |
gh_patches_debug_48420 | rasdani/github-patches | git_diff | gammapy__gammapy-1622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cryptic error from MapMaker / make_counts_image
I accidentally typed this:
```python
import astropy.units as u
from gammapy.maps import WcsGeom
from gammapy.cube import MapMaker
from gammapy.data import DataStore
data_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/cta-1dc/index/gps/')
obs_id = [110380, 111140, 111159]
obs_list = data_store.obs_list(obs_id)
geom = WcsGeom.create(
skydir=(0, 0),
npix=(800, 600),
binsz=0.02,
coordsys='GAL',
)
maker = MapMaker(geom, offset_max=u.Quantity('2 deg'))
images = maker.run(obs_list)
```
and it blows up with a cryptic error message:
```
$ python temp.py
|===========================================>--------------------------------------------------------------------------------------| 1 / 3 (33.33%) ETA 0sTraceback (most recent call last):
File "temp.py", line 15, in <module>
images = maker.run(obs_list)
File "/Users/deil/work/code/gammapy/gammapy/cube/new.py", line 324, in run
self.process_obs(obs)
File "/Users/deil/work/code/gammapy/gammapy/cube/new.py", line 280, in process_obs
obs.events, cutout_geom, obs.pointing_radec, self.offset_max,
File "/Users/deil/work/code/gammapy/gammapy/cube/new.py", line 79, in make_map_counts
counts_map.data[:, offset_mask] = 0
IndexError: too many indices for array
```
The problem is in `make_map_counts` here:
https://github.com/gammapy/gammapy/blob/a013ff8ac532ab8b15cee95c5da2abb8937bde9c/gammapy/cube/new.py#L79
It doesn't work for 2D images.
There's other obvious issues one encounters when making maps, e.g. replacing `offset_max=u.Quantity('2 deg')` with `offset_max='2 deg'` above gives another cryptic error, because the mapmaker just does `self.offset_max = offset_max` but should do `self.offset_max = Angle(offset_max)` to be kind to users.
The solution is to rewrite the functions in `new.py` to take a mask instead of a max offset, and to improve their test coverage, e.g. also trying to run them on a 2D geom (and either succeed, or error out with a good error message).
I consider this high priority, we should do that tomorrow.
@registerrier - you or me?
</issue>
<code>
[start of gammapy/cube/make.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 from __future__ import absolute_import, division, print_function, unicode_literals
3 import logging
4 from astropy.utils.console import ProgressBar
5 from astropy.nddata.utils import PartialOverlapError
6 from astropy.coordinates import Angle
7 from ..maps import WcsNDMap
8 from .counts import make_map_counts
9 from .exposure import make_map_exposure_true_energy
10 from .background import make_map_background_irf, make_map_background_fov
11
12 __all__ = [
13 'MapMaker',
14 ]
15
16 log = logging.getLogger(__name__)
17
18
19 class MapMaker(object):
20 """Make all basic maps from observations.
21
22 Parameters
23 ----------
24 geom : `~gammapy.maps.WcsGeom`
25 Reference image geometry
26 offset_max : `~astropy.coordinates.Angle`
27 Maximum offset angle
28 cutout_mode : {'trim', 'strict'}, optional
29 Options for making cutouts, see :func: `~gammapy.maps.WcsNDMap.make_cutout`
30 Should be left to the default value 'trim'
31 unless you want only fully contained observations to be added to the map
32 """
33
34 def __init__(self, geom, offset_max, cutout_mode="trim"):
35 self.geom = geom
36 self.offset_max = Angle(offset_max)
37
38 # We instantiate the end products of the MakeMaps class
39 self.counts_map = WcsNDMap(self.geom)
40
41 self.exposure_map = WcsNDMap(self.geom, unit="m2 s")
42
43 self.background_map = WcsNDMap(self.geom)
44
45 # We will need this general exclusion mask for the analysis
46 self.exclusion_map = WcsNDMap(self.geom)
47 self.exclusion_map.data += 1
48
49 self.cutout_mode = cutout_mode
50 self.maps = {}
51
52 def process_obs(self, obs):
53 """Process one observation and add it to the cutout image
54
55 Parameters
56 ----------
57 obs : `~gammapy.data.DataStoreObservation`
58 Observation
59 """
60 # First make cutout of the global image
61 try:
62 exclusion_mask_cutout, cutout_slices = self.exclusion_map.make_cutout(
63 obs.pointing_radec, 2 * self.offset_max, mode=self.cutout_mode
64 )
65 except PartialOverlapError:
66 # TODO: can we silently do the right thing here? Discuss
67 log.info("Observation {} not fully contained in target image. Skipping it.".format(obs.obs_id))
68 return
69
70 cutout_geom = exclusion_mask_cutout.geom
71
72 offset = exclusion_mask_cutout.geom.separation(obs.pointing_radec)
73 offset_mask = offset >= self.offset_max
74
75 counts_obs_map = make_map_counts(obs.events, cutout_geom)
76 counts_obs_map.data[:, offset_mask] = 0
77
78 expo_obs_map = make_map_exposure_true_energy(
79 obs.pointing_radec, obs.observation_live_time_duration,
80 obs.aeff, cutout_geom
81 )
82 expo_obs_map.data[:, offset_mask] = 0
83
84 acceptance_obs_map = make_map_background_irf(
85 obs.pointing_radec, obs.observation_live_time_duration,
86 obs.bkg, cutout_geom
87 )
88 acceptance_obs_map.data[:, offset_mask] = 0
89
90 background_obs_map = make_map_background_fov(
91 acceptance_obs_map, counts_obs_map, exclusion_mask_cutout,
92 )
93 background_obs_map.data[:, offset_mask] = 0
94
95 self._add_cutouts(cutout_slices, counts_obs_map, expo_obs_map, background_obs_map)
96
97 def _add_cutouts(self, cutout_slices, counts_obs_map, expo_obs_map, acceptance_obs_map):
98 """Add current cutout to global maps."""
99 self.counts_map.data[cutout_slices] += counts_obs_map.data
100 self.exposure_map.data[cutout_slices] += expo_obs_map.quantity.to(self.exposure_map.unit).value
101 self.background_map.data[cutout_slices] += acceptance_obs_map.data
102
103 def run(self, obs_list):
104 """
105 Run MapMaker for a list of observations to create
106 stacked counts, exposure and background maps
107
108 Parameters
109 --------------
110 obs_list: `~gammapy.data.ObservationList`
111 List of observations
112
113 Returns
114 -----------
115 maps: dict of stacked counts, background and exposure maps.
116 """
117 for obs in ProgressBar(obs_list):
118 self.process_obs(obs)
119
120 self.maps = {
121 'counts_map': self.counts_map,
122 'background_map': self.background_map,
123 'exposure_map': self.exposure_map
124 }
125 return self.maps
126
[end of gammapy/cube/make.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/cube/make.py b/gammapy/cube/make.py
--- a/gammapy/cube/make.py
+++ b/gammapy/cube/make.py
@@ -32,6 +32,9 @@
"""
def __init__(self, geom, offset_max, cutout_mode="trim"):
+ if geom.is_image:
+ raise ValueError('MapMaker only works with geom with an energy axis')
+
self.geom = geom
self.offset_max = Angle(offset_max)
| {"golden_diff": "diff --git a/gammapy/cube/make.py b/gammapy/cube/make.py\n--- a/gammapy/cube/make.py\n+++ b/gammapy/cube/make.py\n@@ -32,6 +32,9 @@\n \"\"\"\n \n def __init__(self, geom, offset_max, cutout_mode=\"trim\"):\n+ if geom.is_image:\n+ raise ValueError('MapMaker only works with geom with an energy axis')\n+\n self.geom = geom\n self.offset_max = Angle(offset_max)\n", "issue": "Cryptic error from MapMaker / make_counts_image\nI accidentally typed this:\r\n```python\r\nimport astropy.units as u\r\nfrom gammapy.maps import WcsGeom\r\nfrom gammapy.cube import MapMaker\r\nfrom gammapy.data import DataStore\r\ndata_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/cta-1dc/index/gps/')\r\nobs_id = [110380, 111140, 111159]\r\nobs_list = data_store.obs_list(obs_id)\r\ngeom = WcsGeom.create(\r\n skydir=(0, 0),\r\n npix=(800, 600),\r\n binsz=0.02,\r\n coordsys='GAL',\r\n)\r\nmaker = MapMaker(geom, offset_max=u.Quantity('2 deg'))\r\nimages = maker.run(obs_list)\r\n```\r\nand it blows up with a cryptic error message:\r\n```\r\n$ python temp.py \r\n|===========================================>--------------------------------------------------------------------------------------| 1 / 3 (33.33%) ETA 0sTraceback (most recent call last):\r\n File \"temp.py\", line 15, in <module>\r\n images = maker.run(obs_list)\r\n File \"/Users/deil/work/code/gammapy/gammapy/cube/new.py\", line 324, in run\r\n self.process_obs(obs)\r\n File \"/Users/deil/work/code/gammapy/gammapy/cube/new.py\", line 280, in process_obs\r\n obs.events, cutout_geom, obs.pointing_radec, self.offset_max,\r\n File \"/Users/deil/work/code/gammapy/gammapy/cube/new.py\", line 79, in make_map_counts\r\n counts_map.data[:, offset_mask] = 0\r\nIndexError: too many indices for array\r\n```\r\n\r\nThe problem is in `make_map_counts` here:\r\nhttps://github.com/gammapy/gammapy/blob/a013ff8ac532ab8b15cee95c5da2abb8937bde9c/gammapy/cube/new.py#L79\r\n\r\nIt doesn't work for 2D images.\r\n\r\nThere's other obvious issues one encounters when making maps, e.g. replacing `offset_max=u.Quantity('2 deg')` with `offset_max='2 deg'` above gives another cryptic error, because the mapmaker just does `self.offset_max = offset_max` but should do `self.offset_max = Angle(offset_max)` to be kind to users.\r\n\r\nThe solution is to rewrite the functions in `new.py` to take a mask instead of a max offset, and to improve their test coverage, e.g. also trying to run them on a 2D geom (and either succeed, or error out with a good error message).\r\n\r\nI consider this high priority, we should do that tomorrow.\r\n\r\n@registerrier - you or me?\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport logging\nfrom astropy.utils.console import ProgressBar\nfrom astropy.nddata.utils import PartialOverlapError\nfrom astropy.coordinates import Angle\nfrom ..maps import WcsNDMap\nfrom .counts import make_map_counts\nfrom .exposure import make_map_exposure_true_energy\nfrom .background import make_map_background_irf, make_map_background_fov\n\n__all__ = [\n 'MapMaker',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass MapMaker(object):\n \"\"\"Make all basic maps from observations.\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom`\n Reference image geometry\n offset_max : `~astropy.coordinates.Angle`\n Maximum offset angle\n cutout_mode : {'trim', 'strict'}, optional\n Options for making cutouts, see :func: `~gammapy.maps.WcsNDMap.make_cutout`\n Should be left to the default value 'trim'\n unless you want only fully contained observations to be added to the map\n \"\"\"\n\n def __init__(self, geom, offset_max, cutout_mode=\"trim\"):\n self.geom = geom\n self.offset_max = Angle(offset_max)\n\n # We instantiate the end products of the MakeMaps class\n self.counts_map = WcsNDMap(self.geom)\n\n self.exposure_map = WcsNDMap(self.geom, unit=\"m2 s\")\n\n self.background_map = WcsNDMap(self.geom)\n\n # We will need this general exclusion mask for the analysis\n self.exclusion_map = WcsNDMap(self.geom)\n self.exclusion_map.data += 1\n\n self.cutout_mode = cutout_mode\n self.maps = {}\n\n def process_obs(self, obs):\n \"\"\"Process one observation and add it to the cutout image\n\n Parameters\n ----------\n obs : `~gammapy.data.DataStoreObservation`\n Observation\n \"\"\"\n # First make cutout of the global image\n try:\n exclusion_mask_cutout, cutout_slices = self.exclusion_map.make_cutout(\n obs.pointing_radec, 2 * self.offset_max, mode=self.cutout_mode\n )\n except PartialOverlapError:\n # TODO: can we silently do the right thing here? Discuss\n log.info(\"Observation {} not fully contained in target image. Skipping it.\".format(obs.obs_id))\n return\n\n cutout_geom = exclusion_mask_cutout.geom\n\n offset = exclusion_mask_cutout.geom.separation(obs.pointing_radec)\n offset_mask = offset >= self.offset_max\n\n counts_obs_map = make_map_counts(obs.events, cutout_geom)\n counts_obs_map.data[:, offset_mask] = 0\n\n expo_obs_map = make_map_exposure_true_energy(\n obs.pointing_radec, obs.observation_live_time_duration,\n obs.aeff, cutout_geom\n )\n expo_obs_map.data[:, offset_mask] = 0\n\n acceptance_obs_map = make_map_background_irf(\n obs.pointing_radec, obs.observation_live_time_duration,\n obs.bkg, cutout_geom\n )\n acceptance_obs_map.data[:, offset_mask] = 0\n\n background_obs_map = make_map_background_fov(\n acceptance_obs_map, counts_obs_map, exclusion_mask_cutout,\n )\n background_obs_map.data[:, offset_mask] = 0\n\n self._add_cutouts(cutout_slices, counts_obs_map, expo_obs_map, background_obs_map)\n\n def _add_cutouts(self, cutout_slices, counts_obs_map, expo_obs_map, acceptance_obs_map):\n \"\"\"Add current cutout to global maps.\"\"\"\n self.counts_map.data[cutout_slices] += counts_obs_map.data\n self.exposure_map.data[cutout_slices] += expo_obs_map.quantity.to(self.exposure_map.unit).value\n self.background_map.data[cutout_slices] += acceptance_obs_map.data\n\n def run(self, obs_list):\n \"\"\"\n Run MapMaker for a list of observations to create\n stacked counts, exposure and background maps\n\n Parameters\n --------------\n obs_list: `~gammapy.data.ObservationList`\n List of observations\n\n Returns\n -----------\n maps: dict of stacked counts, background and exposure maps.\n \"\"\"\n for obs in ProgressBar(obs_list):\n self.process_obs(obs)\n\n self.maps = {\n 'counts_map': self.counts_map,\n 'background_map': self.background_map,\n 'exposure_map': self.exposure_map\n }\n return self.maps\n", "path": "gammapy/cube/make.py"}]} | 2,450 | 120 |
gh_patches_debug_32068 | rasdani/github-patches | git_diff | conan-io__conan-center-index-16242 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libudev/system: Fails build for conan 2.0
### Description
libudev/system fails to download or build with conan 2.0 installed. it needs an update to use conan 2.0 code for conan tools as it currently is dependent on conan 1.x code.
### Package and Environment Details
* Package Name/Version: **libudev/system**
* Operating System+version: **Linux Ubuntu 20.04**
### Conan profile
[settings]
arch=x86_64
build_type=Release
compiler=gcc
compiler.cppstd=gnu17
compiler.libcxx=libstdc++11
compiler.version=9
os=Linux
### Steps to reproduce
conan download -r conancenter libudev/system@
### Logs
ERROR: Error loading conanfile at '/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py': Unable to load conanfile in /home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py
File "<frozen importlib._bootstrap_external>", line 848, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py", line 4, in <module>
from conans import tools
ImportError: cannot import name 'tools' from 'conans' (/home/tbitz/.local/lib/python3.8/site-packages/conans/__init__.py)
</issue>
<code>
[start of recipes/libudev/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanException, ConanInvalidConfiguration
3 from conan.tools.system import package_manager
4 from conans import tools
5
6 required_conan_version = ">=1.47"
7
8
9 class LibUDEVConan(ConanFile):
10 name = "libudev"
11 version = "system"
12 description = "API for enumerating and introspecting local devices"
13 topics = ("udev", "devices", "enumerating")
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://www.freedesktop.org/software/systemd/man/udev.html"
16 license = "GPL-2.0-or-later", "LGPL-2.1-or-later"
17 settings = "os", "arch", "compiler", "build_type"
18
19 def validate(self):
20 if self.settings.os != "Linux":
21 raise ConanInvalidConfiguration("libudev is only supported on Linux.")
22
23 def package_id(self):
24 self.info.header_only()
25
26 def _fill_cppinfo_from_pkgconfig(self, name):
27 pkg_config = tools.PkgConfig(name)
28 if not pkg_config.provides:
29 raise ConanException("libudev development files aren't available, give up")
30 libs = [lib[2:] for lib in pkg_config.libs_only_l]
31 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
32 ldflags = [flag for flag in pkg_config.libs_only_other]
33 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
34 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
35 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
36
37 self.cpp_info.system_libs = libs
38 self.cpp_info.libdirs = lib_dirs
39 self.cpp_info.sharedlinkflags = ldflags
40 self.cpp_info.exelinkflags = ldflags
41 self.cpp_info.defines = defines
42 self.cpp_info.includedirs = include_dirs
43 self.cpp_info.cflags = cflags
44 self.cpp_info.cxxflags = cflags
45
46 def system_requirements(self):
47 dnf = package_manager.Dnf(self)
48 dnf.install(["systemd-devel"], update=True, check=True)
49
50 yum = package_manager.Yum(self)
51 yum.install(["systemd-devel"], update=True, check=True)
52
53 apt = package_manager.Apt(self)
54 apt.install(["libudev-dev"], update=True, check=True)
55
56 pacman = package_manager.PacMan(self)
57 pacman.install(["systemd-libs"], update=True, check=True)
58
59 zypper = package_manager.Zypper(self)
60 zypper.install(["libudev-devel"], update=True, check=True)
61
62 def package_info(self):
63 self.cpp_info.includedirs = []
64 self.cpp_info.libdirs = []
65 self._fill_cppinfo_from_pkgconfig("libudev")
66
[end of recipes/libudev/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libudev/all/conanfile.py b/recipes/libudev/all/conanfile.py
--- a/recipes/libudev/all/conanfile.py
+++ b/recipes/libudev/all/conanfile.py
@@ -1,7 +1,7 @@
from conan import ConanFile
-from conan.errors import ConanException, ConanInvalidConfiguration
+from conan.errors import ConanInvalidConfiguration
from conan.tools.system import package_manager
-from conans import tools
+from conan.tools.gnu import PkgConfig
required_conan_version = ">=1.47"
@@ -21,27 +21,7 @@
raise ConanInvalidConfiguration("libudev is only supported on Linux.")
def package_id(self):
- self.info.header_only()
-
- def _fill_cppinfo_from_pkgconfig(self, name):
- pkg_config = tools.PkgConfig(name)
- if not pkg_config.provides:
- raise ConanException("libudev development files aren't available, give up")
- libs = [lib[2:] for lib in pkg_config.libs_only_l]
- lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
- ldflags = [flag for flag in pkg_config.libs_only_other]
- include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
- cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
- defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
-
- self.cpp_info.system_libs = libs
- self.cpp_info.libdirs = lib_dirs
- self.cpp_info.sharedlinkflags = ldflags
- self.cpp_info.exelinkflags = ldflags
- self.cpp_info.defines = defines
- self.cpp_info.includedirs = include_dirs
- self.cpp_info.cflags = cflags
- self.cpp_info.cxxflags = cflags
+ self.info.clear()
def system_requirements(self):
dnf = package_manager.Dnf(self)
@@ -62,4 +42,5 @@
def package_info(self):
self.cpp_info.includedirs = []
self.cpp_info.libdirs = []
- self._fill_cppinfo_from_pkgconfig("libudev")
+ pkg_config = PkgConfig(self, "libudev")
+ pkg_config.fill_cpp_info(self.cpp_info)
| {"golden_diff": "diff --git a/recipes/libudev/all/conanfile.py b/recipes/libudev/all/conanfile.py\n--- a/recipes/libudev/all/conanfile.py\n+++ b/recipes/libudev/all/conanfile.py\n@@ -1,7 +1,7 @@\n from conan import ConanFile\n-from conan.errors import ConanException, ConanInvalidConfiguration\n+from conan.errors import ConanInvalidConfiguration\n from conan.tools.system import package_manager\n-from conans import tools\n+from conan.tools.gnu import PkgConfig\n \n required_conan_version = \">=1.47\"\n \n@@ -21,27 +21,7 @@\n raise ConanInvalidConfiguration(\"libudev is only supported on Linux.\")\n \n def package_id(self):\n- self.info.header_only()\n-\n- def _fill_cppinfo_from_pkgconfig(self, name):\n- pkg_config = tools.PkgConfig(name)\n- if not pkg_config.provides:\n- raise ConanException(\"libudev development files aren't available, give up\")\n- libs = [lib[2:] for lib in pkg_config.libs_only_l]\n- lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n- ldflags = [flag for flag in pkg_config.libs_only_other]\n- include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n- cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n- defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n-\n- self.cpp_info.system_libs = libs\n- self.cpp_info.libdirs = lib_dirs\n- self.cpp_info.sharedlinkflags = ldflags\n- self.cpp_info.exelinkflags = ldflags\n- self.cpp_info.defines = defines\n- self.cpp_info.includedirs = include_dirs\n- self.cpp_info.cflags = cflags\n- self.cpp_info.cxxflags = cflags\n+ self.info.clear()\n \n def system_requirements(self):\n dnf = package_manager.Dnf(self)\n@@ -62,4 +42,5 @@\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n- self._fill_cppinfo_from_pkgconfig(\"libudev\")\n+ pkg_config = PkgConfig(self, \"libudev\")\n+ pkg_config.fill_cpp_info(self.cpp_info)\n", "issue": "[package] libudev/system: Fails build for conan 2.0\n### Description\n\nlibudev/system fails to download or build with conan 2.0 installed. it needs an update to use conan 2.0 code for conan tools as it currently is dependent on conan 1.x code. \n\n### Package and Environment Details\n\n* Package Name/Version: **libudev/system**\r\n* Operating System+version: **Linux Ubuntu 20.04**\r\n\n\n### Conan profile\n\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.cppstd=gnu17\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=9\r\nos=Linux\r\n\n\n### Steps to reproduce\n\nconan download -r conancenter libudev/system@\n\n### Logs\n\nERROR: Error loading conanfile at '/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py': Unable to load conanfile in /home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 848, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py\", line 4, in <module>\r\n from conans import tools\r\nImportError: cannot import name 'tools' from 'conans' (/home/tbitz/.local/lib/python3.8/site-packages/conans/__init__.py)\r\n\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanException, ConanInvalidConfiguration\nfrom conan.tools.system import package_manager\nfrom conans import tools\n\nrequired_conan_version = \">=1.47\"\n\n\nclass LibUDEVConan(ConanFile):\n name = \"libudev\"\n version = \"system\"\n description = \"API for enumerating and introspecting local devices\"\n topics = (\"udev\", \"devices\", \"enumerating\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.freedesktop.org/software/systemd/man/udev.html\"\n license = \"GPL-2.0-or-later\", \"LGPL-2.1-or-later\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n def validate(self):\n if self.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"libudev is only supported on Linux.\")\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"libudev development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs = libs\n self.cpp_info.libdirs = lib_dirs\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.defines = defines\n self.cpp_info.includedirs = include_dirs\n self.cpp_info.cflags = cflags\n self.cpp_info.cxxflags = cflags\n\n def system_requirements(self):\n dnf = package_manager.Dnf(self)\n dnf.install([\"systemd-devel\"], update=True, check=True)\n\n yum = package_manager.Yum(self)\n yum.install([\"systemd-devel\"], update=True, check=True)\n\n apt = package_manager.Apt(self)\n apt.install([\"libudev-dev\"], update=True, check=True)\n\n pacman = package_manager.PacMan(self)\n pacman.install([\"systemd-libs\"], update=True, check=True)\n\n zypper = package_manager.Zypper(self)\n zypper.install([\"libudev-devel\"], update=True, check=True)\n\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n self._fill_cppinfo_from_pkgconfig(\"libudev\")\n", "path": "recipes/libudev/all/conanfile.py"}]} | 1,675 | 529 |
gh_patches_debug_29919 | rasdani/github-patches | git_diff | uccser__cs-unplugged-737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support RTL/BiDi website layout
Currently, the html attribute `dir` is set to RTL when required, which gets us part of the way there. However more changes are needed to truly mirror the layout. This essentially boils down to switching 'left' with 'right' in css rules and html classes in all but a few exceptional cases.
Some good suggestions for how to use `sass`/`scss` features to achieve this are included in this blog: http://matanich.com/2013/09/06/rtl-css-with-sass
</issue>
<code>
[start of csunplugged/config/settings/base.py]
1 # -*- coding: utf-8 -*-
2 """
3 Base Django settings for CS Unplugged project.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/dev/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/dev/ref/settings/
10 """
11
12 import environ
13 import os.path
14
15 # Add custom languages not provided by Django
16 import django.conf.locale
17 from django.conf import global_settings
18 from django.utils.translation import ugettext_lazy as _
19
20 # cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/
21 ROOT_DIR = environ.Path(__file__) - 3
22
23 # Load operating system environment variables and then prepare to use them
24 env = environ.Env()
25
26 # APP CONFIGURATION
27 # ----------------------------------------------------------------------------
28 DJANGO_APPS = [
29 # Default Django apps:
30 "django.contrib.auth",
31 "django.contrib.contenttypes",
32 "django.contrib.sessions",
33 "django.contrib.messages",
34 "django.contrib.staticfiles",
35 "django.contrib.postgres",
36
37 # Useful template tags
38 "django.contrib.humanize",
39
40 # Admin
41 "django.contrib.admin",
42 ]
43 THIRD_PARTY_APPS = [
44 "django_bootstrap_breadcrumbs",
45 "modeltranslation",
46 ]
47
48 # Apps specific for this project go here.
49 LOCAL_APPS = [
50 "general.apps.GeneralConfig",
51 "topics.apps.TopicsConfig",
52 "resources.apps.ResourcesConfig",
53 ]
54
55 # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
56 INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
57
58 # MIDDLEWARE CONFIGURATION
59 # ----------------------------------------------------------------------------
60 MIDDLEWARE = [
61 "django.middleware.security.SecurityMiddleware",
62 "django.contrib.sessions.middleware.SessionMiddleware",
63 "django.middleware.locale.LocaleMiddleware",
64 "django.middleware.common.CommonMiddleware",
65 "django.middleware.csrf.CsrfViewMiddleware",
66 "django.contrib.auth.middleware.AuthenticationMiddleware",
67 "django.contrib.messages.middleware.MessageMiddleware",
68 "django.middleware.clickjacking.XFrameOptionsMiddleware",
69 ]
70
71 # DEBUG
72 # ----------------------------------------------------------------------------
73 # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
74 DEBUG = env.bool("DJANGO_DEBUG", False)
75
76 # FIXTURE CONFIGURATION
77 # ----------------------------------------------------------------------------
78 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
79 FIXTURE_DIRS = (
80 str(ROOT_DIR.path("fixtures")),
81 )
82
83 # EMAIL CONFIGURATION
84 # -----------------------------------------------------------------------------
85 # EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND",
86 # default="django.core.mail.backends.smtp.EmailBackend")
87
88 # MANAGER CONFIGURATION
89 # ----------------------------------------------------------------------------
90 # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
91 # ADMINS = [
92 # ("University of Canterbury Computer Science Research Group",
93 # "[email protected]"),
94 # ]
95
96 # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
97 # MANAGERS = ADMINS
98
99 # GENERAL CONFIGURATION
100 # ----------------------------------------------------------------------------
101 # Local time zone for this installation. Choices can be found here:
102 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
103 # although not all choices may be available on all operating systems.
104 # In a Windows environment this must be set to your system time zone.
105 TIME_ZONE = "UTC"
106
107 # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
108 LANGUAGE_CODE = "en"
109
110 INCONTEXT_L10N_PSEUDOLANGUAGE = "xx-lr"
111 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = "yy-rl"
112 INCONTEXT_L10N_PSEUDOLANGUAGES = (
113 INCONTEXT_L10N_PSEUDOLANGUAGE,
114 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI
115 )
116
117 LANGUAGES = (
118 ("en", "English"),
119 )
120
121 if env.bool("INCLUDE_INCONTEXT_L10N", False):
122 EXTRA_LANGUAGES = [
123 (INCONTEXT_L10N_PSEUDOLANGUAGE, "Translation mode"),
124 (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, "Translation mode (Bi-directional)"),
125 ]
126
127 EXTRA_LANG_INFO = {
128 INCONTEXT_L10N_PSEUDOLANGUAGE: {
129 'bidi': False,
130 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,
131 'name': "Translation mode",
132 'name_local': _("Translation mode"),
133 },
134 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {
135 'bidi': True,
136 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,
137 'name': "Translation mode (Bi-directional)",
138 'name_local': _("Translation mode (Bi-directional)"),
139 }
140 }
141
142 django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)
143 # Add new languages to the list of all django languages
144 global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES
145 # Add new languages to the list of languages used for this project
146 LANGUAGES += tuple(EXTRA_LANGUAGES)
147
148
149 # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
150 SITE_ID = 1
151
152 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
153 USE_I18N = True
154
155 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
156 USE_L10N = True
157
158 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
159 USE_TZ = True
160
161 # See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
162 LOCALE_PATHS = ["locale"]
163
164 # TEMPLATE CONFIGURATION
165 # ----------------------------------------------------------------------------
166 # See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
167 TEMPLATES = [
168 {
169 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
170 "BACKEND": "django.template.backends.django.DjangoTemplates",
171 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
172 "DIRS": [
173 str(ROOT_DIR.path("templates")),
174 ],
175 "OPTIONS": {
176 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
177 "debug": DEBUG,
178 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
179 # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
180 "loaders": [
181 "django.template.loaders.filesystem.Loader",
182 "django.template.loaders.app_directories.Loader",
183 ],
184 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
185 "context_processors": [
186 "django.template.context_processors.debug",
187 "django.template.context_processors.request",
188 "django.contrib.auth.context_processors.auth",
189 "django.template.context_processors.i18n",
190 "django.template.context_processors.media",
191 "django.template.context_processors.static",
192 "django.template.context_processors.tz",
193 "django.contrib.messages.context_processors.messages",
194 "config.context_processors.version_number.version_number",
195 "config.context_processors.deployed.deployed",
196 ],
197 "libraries": {
198 "render_html_field": "config.templatetags.render_html_field",
199 "translate_url": "config.templatetags.translate_url",
200 },
201 },
202 },
203 ]
204
205 # STATIC FILE CONFIGURATION
206 # ------------------------------------------------------------------------------
207 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
208 STATIC_ROOT = os.path.join(str(ROOT_DIR.path("staticfiles")), "")
209
210 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
211 BUILD_ROOT = os.path.join(str(ROOT_DIR.path("build")), "")
212 STATIC_URL = "/staticfiles/"
213
214 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
215 STATICFILES_DIRS = [
216 BUILD_ROOT,
217 ]
218
219 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
220 STATICFILES_FINDERS = [
221 "django.contrib.staticfiles.finders.FileSystemFinder",
222 "django.contrib.staticfiles.finders.AppDirectoriesFinder",
223 ]
224
225 # MEDIA CONFIGURATION
226 # ------------------------------------------------------------------------------
227 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
228 MEDIA_ROOT = str(ROOT_DIR("media"))
229
230 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
231 MEDIA_URL = "/media/"
232
233 # URL Configuration
234 # ------------------------------------------------------------------------------
235 ROOT_URLCONF = "config.urls"
236
237 # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
238 WSGI_APPLICATION = "config.wsgi.application"
239
240 # PASSWORD VALIDATION
241 # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
242 # ------------------------------------------------------------------------------
243
244 AUTH_PASSWORD_VALIDATORS = [
245 {
246 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
247 },
248 {
249 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
250 },
251 {
252 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
253 },
254 {
255 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
256 },
257 ]
258 # OTHER SETTINGS
259 # ------------------------------------------------------------------------------
260 DJANGO_PRODUCTION = env.bool("DJANGO_PRODUCTION")
261 TOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("topics")), "content")
262 RESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("resources")), "content")
263 RESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path("staticfiles")), "resources")
264 RESOURCE_GENERATORS_PACKAGE = "resources.generators"
265 RESOURCE_COPY_AMOUNT = 20
266 SCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path("temp"))
267 CUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path("utils")), "custom_converter_templates", "")
268 MODELTRANSLATION_CUSTOM_FIELDS = ("JSONField",)
269
[end of csunplugged/config/settings/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py
--- a/csunplugged/config/settings/base.py
+++ b/csunplugged/config/settings/base.py
@@ -43,6 +43,7 @@
THIRD_PARTY_APPS = [
"django_bootstrap_breadcrumbs",
"modeltranslation",
+ "bidiutils",
]
# Apps specific for this project go here.
@@ -142,8 +143,11 @@
django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)
# Add new languages to the list of all django languages
global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES
+ global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +
+ [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])
# Add new languages to the list of languages used for this project
LANGUAGES += tuple(EXTRA_LANGUAGES)
+ LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
@@ -193,6 +197,7 @@
"django.contrib.messages.context_processors.messages",
"config.context_processors.version_number.version_number",
"config.context_processors.deployed.deployed",
+ "bidiutils.context_processors.bidi",
],
"libraries": {
"render_html_field": "config.templatetags.render_html_field",
| {"golden_diff": "diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py\n--- a/csunplugged/config/settings/base.py\n+++ b/csunplugged/config/settings/base.py\n@@ -43,6 +43,7 @@\n THIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"modeltranslation\",\n+ \"bidiutils\",\n ]\n \n # Apps specific for this project go here.\n@@ -142,8 +143,11 @@\n django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n+ global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +\n+ [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n+ LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI\n \n \n # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\n@@ -193,6 +197,7 @@\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n+ \"bidiutils.context_processors.bidi\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n", "issue": "Support RTL/BiDi website layout\nCurrently, the html attribute `dir` is set to RTL when required, which gets us part of the way there. However more changes are needed to truly mirror the layout. This essentially boils down to switching 'left' with 'right' in css rules and html classes in all but a few exceptional cases. \r\n\r\nSome good suggestions for how to use `sass`/`scss` features to achieve this are included in this blog: http://matanich.com/2013/09/06/rtl-css-with-sass\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nBase Django settings for CS Unplugged project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\nimport os.path\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nfrom django.conf import global_settings\nfrom django.utils.translation import ugettext_lazy as _\n\n# cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/\nROOT_DIR = environ.Path(__file__) - 3\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# APP CONFIGURATION\n# ----------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.postgres\",\n\n # Useful template tags\n \"django.contrib.humanize\",\n\n # Admin\n \"django.contrib.admin\",\n]\nTHIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"modeltranslation\",\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n \"general.apps.GeneralConfig\",\n \"topics.apps.TopicsConfig\",\n \"resources.apps.ResourcesConfig\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ----------------------------------------------------------------------------\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# DEBUG\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n\n# FIXTURE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(ROOT_DIR.path(\"fixtures\")),\n)\n\n# EMAIL CONFIGURATION\n# -----------------------------------------------------------------------------\n# EMAIL_BACKEND = env(\"DJANGO_EMAIL_BACKEND\",\n# default=\"django.core.mail.backends.smtp.EmailBackend\")\n\n# MANAGER CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# ADMINS = [\n# (\"University of Canterbury Computer Science Research Group\",\n# \"[email protected]\"),\n# ]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\n# MANAGERS = ADMINS\n\n# GENERAL CONFIGURATION\n# ----------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en\"\n\nINCONTEXT_L10N_PSEUDOLANGUAGE = \"xx-lr\"\nINCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = \"yy-rl\"\nINCONTEXT_L10N_PSEUDOLANGUAGES = (\n INCONTEXT_L10N_PSEUDOLANGUAGE,\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI\n)\n\nLANGUAGES = (\n (\"en\", \"English\"),\n)\n\nif env.bool(\"INCLUDE_INCONTEXT_L10N\", False):\n EXTRA_LANGUAGES = [\n (INCONTEXT_L10N_PSEUDOLANGUAGE, \"Translation mode\"),\n (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, \"Translation mode (Bi-directional)\"),\n ]\n\n EXTRA_LANG_INFO = {\n INCONTEXT_L10N_PSEUDOLANGUAGE: {\n 'bidi': False,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,\n 'name': \"Translation mode\",\n 'name_local': _(\"Translation mode\"),\n },\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {\n 'bidi': True,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,\n 'name': \"Translation mode (Bi-directional)\",\n 'name_local': _(\"Translation mode (Bi-directional)\"),\n }\n }\n\n django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [\"locale\"]\n\n# TEMPLATE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [\n str(ROOT_DIR.path(\"templates\")),\n ],\n \"OPTIONS\": {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n \"debug\": DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n \"translate_url\": \"config.templatetags.translate_url\",\n },\n },\n },\n]\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"\")\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nBUILD_ROOT = os.path.join(str(ROOT_DIR.path(\"build\")), \"\")\nSTATIC_URL = \"/staticfiles/\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n BUILD_ROOT,\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR(\"media\"))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = \"config.urls\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n# OTHER SETTINGS\n# ------------------------------------------------------------------------------\nDJANGO_PRODUCTION = env.bool(\"DJANGO_PRODUCTION\")\nTOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"topics\")), \"content\")\nRESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"resources\")), \"content\")\nRESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"resources\")\nRESOURCE_GENERATORS_PACKAGE = \"resources.generators\"\nRESOURCE_COPY_AMOUNT = 20\nSCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path(\"temp\"))\nCUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path(\"utils\")), \"custom_converter_templates\", \"\")\nMODELTRANSLATION_CUSTOM_FIELDS = (\"JSONField\",)\n", "path": "csunplugged/config/settings/base.py"}]} | 3,440 | 334 |
gh_patches_debug_40910 | rasdani/github-patches | git_diff | DDMAL__CantusDB-200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
currently missing a user list page
implement it similar to indexer list page
</issue>
<code>
[start of django/cantusdb_project/main_app/urls.py]
1 from django.urls import path, include
2 from main_app.views import *
3 from main_app.views import views
4 from main_app.views.sequence import SequenceEditView
5 from main_app.views.source import SourceCreateView, SourceEditView
6 from main_app.views.chant import ChantEditVolpianoView
7 from django.contrib.auth import views as auth_views
8 from main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView
9
10 urlpatterns = [
11 # static pages
12 path("index/", FullIndexView.as_view(), name="chant-index"),
13 path("contact/", views.contact_us, name="contact"),
14 # login/logout/user
15 path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name="login"),
16 path('logout/', CustomLogoutView.as_view(), name="logout"),
17 path("my-sources/", UserSourceListView.as_view(), name="my-sources"),
18 path("users/<int:user_id>", UserDetailView.as_view(), name="user-detail"),
19
20 # chant
21 path("chants/", ChantListView.as_view(), name="chant-list"),
22 path("chant/<int:pk>", ChantDetailView.as_view(), name="chant-detail"),
23 path("chant-search/", ChantSearchView.as_view(), name="chant-search"),
24 path(
25 "chant-create/<int:source_pk>", ChantCreateView.as_view(), name="chant-create"
26 ),
27 path("chant-update/<int:pk>", ChantUpdateView.as_view(), name="chant-update"),
28 path(
29 "id/<str:cantus_id>", ChantByCantusIDView.as_view(), name="chant-by-cantus-id"
30 ),
31 path("chant-delete/<int:pk>", ChantDeleteView.as_view(), name="chant-delete"),
32 path(
33 "edit-volpiano/<int:source_id>",
34 ChantEditVolpianoView.as_view(),
35 name="source-edit-volpiano"
36 ),
37 # feast
38 path("feasts/", FeastListView.as_view(), name="feast-list"),
39 path("feast/<int:pk>", FeastDetailView.as_view(), name="feast-detail"),
40 # genre
41 path("genres/", GenreListView.as_view(), name="genre-list"),
42 path("genre/<int:pk>", GenreDetailView.as_view(), name="genre-detail"),
43 # indexer
44 path("indexers/", IndexerListView.as_view(), name="indexer-list"),
45 path("indexer/<int:pk>", IndexerDetailView.as_view(), name="indexer-detail"),
46 # office
47 path("offices/", OfficeListView.as_view(), name="office-list"),
48 path("office/<int:pk>", OfficeDetailView.as_view(), name="office-detail"),
49 # sequence
50 path("sequences/", SequenceListView.as_view(), name="sequence-list"),
51 path("sequence/<int:pk>", SequenceDetailView.as_view(), name="sequence-detail",),
52 path("edit-sequence/<int:sequence_id>", SequenceEditView.as_view(), name="sequence-edit",),
53 # source
54 path("sources/", SourceListView.as_view(), name="source-list"),
55 path("source/<int:pk>", SourceDetailView.as_view(), name="source-detail"),
56 path(
57 "source-create/",
58 SourceCreateView.as_view(),
59 name="source-create"
60 ),
61 path(
62 "edit-source/<int:source_id>",
63 SourceEditView.as_view(),
64 name="source-edit"
65 ),
66 # melody
67 path("melody/", MelodySearchView.as_view(), name="melody-search"),
68 path("ajax/melody/<str:cantus_id>", views.ajax_melody_list, name="ajax-melody"),
69 path("ajax/melody-search/", views.ajax_melody_search, name="ajax-melody-search",),
70 # json api
71 path("json-sources/", views.json_sources_export, name="json-sources-export"),
72 path("json-node/<str:id>", views.json_node_export, name="json-node-export"),
73 path("json-nextchants/<str:cantus_id>", views.json_nextchants, name="json-nextchants"),
74 path(
75 "json-melody/<str:cantus_id>",
76 views.json_melody_export,
77 name="json-melody-export",
78 ),
79 # misc search
80 path(
81 "chant-search-ms/<int:source_pk>",
82 ChantSearchMSView.as_view(),
83 name="chant-search-ms",
84 ),
85 path("ci-search/<str:search_term>", CISearchView.as_view(), name="ci-search"),
86 path(
87 "ajax/search-bar/<str:search_term>",
88 views.ajax_search_bar,
89 name="ajax-search-bar",
90 ),
91 # misc
92 path("content-statistics", views.items_count, name="items-count"),
93 path("csv/<str:source_id>", views.csv_export, name="csv-export"),
94 path(
95 "ajax/concordance/<str:cantus_id>",
96 views.ajax_concordance_list,
97 name="ajax-concordance",
98 ),
99 ]
100
101 handler404 = 'main_app.views.views.handle404'
102
[end of django/cantusdb_project/main_app/urls.py]
[start of django/cantusdb_project/main_app/views/user.py]
1 from urllib import request
2 from django.views.generic import DetailView
3 from django.contrib.auth import get_user_model
4 from main_app.models import Source
5 from django.views.generic import ListView
6 from django.contrib.auth.mixins import LoginRequiredMixin
7 from django.db.models import Q
8 from django.core.paginator import Paginator
9 from django.contrib.auth.views import LogoutView
10 from django.contrib import messages
11
12 class UserDetailView(DetailView):
13 """Detail view for User model
14
15 Accessed by /users/<user_id>
16 """
17
18 model = get_user_model()
19 context_object_name = "user"
20 template_name = "user_detail.html"
21 pk_url_kwarg = 'user_id'
22
23 class UserSourceListView(LoginRequiredMixin, ListView):
24 model = Source
25 context_object_name = "sources"
26 template_name = "user_source_list.html"
27 paginate_by = 100
28
29 def get_queryset(self):
30 return Source.objects.filter(
31 Q(current_editors=self.request.user)
32 | Q(created_by=self.request.user)
33 # | Q(inventoried_by=self.request.user)
34 # | Q(full_text_entered_by=self.request.user)
35 # | Q(melodies_entered_by=self.request.user)
36 # | Q(proofreaders=self.request.user)
37 # | Q(other_editors=self.request.user)
38 ).order_by("title")
39
40 def get_context_data(self, **kwargs):
41 context = super().get_context_data(**kwargs)
42
43 user_created_sources = Source.objects.filter(created_by=self.request.user)
44 paginator = Paginator(user_created_sources, 10)
45 page_number = self.request.GET.get('page2')
46 page_obj = paginator.get_page(page_number)
47
48 context["user_created_sources_page_obj"] = page_obj
49 return context
50
51 class CustomLogoutView(LogoutView):
52 def get_next_page(self):
53 next_page = super().get_next_page()
54 messages.success(
55 self.request,
56 'You have successfully logged out!'
57 )
58 return next_page
59
[end of django/cantusdb_project/main_app/views/user.py]
[start of django/cantusdb_project/users/models.py]
1 from django.db import models
2 from django.contrib.auth.models import AbstractUser
3
4
5 class User(AbstractUser):
6 institution = models.CharField(max_length=255, blank=True, null=True)
7 city = models.CharField(max_length=255, blank=True, null=True)
8 country = models.CharField(max_length=255, blank=True, null=True)
9 website = models.URLField(blank=True, null=True)
10 sources_user_can_edit = models.ManyToManyField("main_app.Source", related_name="users_who_can_edit_this_source", blank=True)
11 full_name = models.CharField(max_length=255, blank=True, null=True)
12
13 @property
14 def name(self):
15 if self.full_name:
16 return self.full_name
17 elif self.first_name and self.last_name:
18 return f'{self.first_name} {self.last_name}'
19
20 def __str__(self):
21 if self.name:
22 return self.name
23 else:
24 return self.username
[end of django/cantusdb_project/users/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py
--- a/django/cantusdb_project/main_app/urls.py
+++ b/django/cantusdb_project/main_app/urls.py
@@ -5,7 +5,7 @@
from main_app.views.source import SourceCreateView, SourceEditView
from main_app.views.chant import ChantEditVolpianoView
from django.contrib.auth import views as auth_views
-from main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView
+from main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView, UserListView
urlpatterns = [
# static pages
@@ -15,7 +15,8 @@
path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name="login"),
path('logout/', CustomLogoutView.as_view(), name="logout"),
path("my-sources/", UserSourceListView.as_view(), name="my-sources"),
- path("users/<int:user_id>", UserDetailView.as_view(), name="user-detail"),
+ path("users/<int:pk>", UserDetailView.as_view(), name="user-detail"),
+ path("users/", UserListView.as_view(), name="user-list"),
# chant
path("chants/", ChantListView.as_view(), name="chant-list"),
diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py
--- a/django/cantusdb_project/main_app/views/user.py
+++ b/django/cantusdb_project/main_app/views/user.py
@@ -8,17 +8,17 @@
from django.core.paginator import Paginator
from django.contrib.auth.views import LogoutView
from django.contrib import messages
+from extra_views import SearchableListMixin
class UserDetailView(DetailView):
"""Detail view for User model
- Accessed by /users/<user_id>
+ Accessed by /users/<pk>
"""
model = get_user_model()
context_object_name = "user"
template_name = "user_detail.html"
- pk_url_kwarg = 'user_id'
class UserSourceListView(LoginRequiredMixin, ListView):
model = Source
@@ -56,3 +56,18 @@
'You have successfully logged out!'
)
return next_page
+
+class UserListView(LoginRequiredMixin, SearchableListMixin, ListView):
+ """Searchable List view for User model
+
+ Accessed by /users/
+
+ When passed a ``?q=<query>`` argument in the GET request, it will filter users
+ based on the fields defined in ``search_fields`` with the ``icontains`` lookup
+ """
+
+ model = get_user_model()
+ search_fields = ["first_name", "last_name", "institution", "city", "country"]
+ paginate_by = 100
+ template_name = "user_list.html"
+ context_object_name = "users"
diff --git a/django/cantusdb_project/users/models.py b/django/cantusdb_project/users/models.py
--- a/django/cantusdb_project/users/models.py
+++ b/django/cantusdb_project/users/models.py
@@ -1,5 +1,6 @@
from django.db import models
from django.contrib.auth.models import AbstractUser
+from django.urls.base import reverse
class User(AbstractUser):
@@ -21,4 +22,9 @@
if self.name:
return self.name
else:
- return self.username
\ No newline at end of file
+ return self.username
+
+ def get_absolute_url(self) -> str:
+ """Get the absolute URL for an instance of a model."""
+ detail_name = self.__class__.__name__.lower() + "-detail"
+ return reverse(detail_name, kwargs={"pk": self.pk})
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py\n--- a/django/cantusdb_project/main_app/urls.py\n+++ b/django/cantusdb_project/main_app/urls.py\n@@ -5,7 +5,7 @@\n from main_app.views.source import SourceCreateView, SourceEditView\n from main_app.views.chant import ChantEditVolpianoView\n from django.contrib.auth import views as auth_views\n-from main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView\n+from main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView, UserListView\n \n urlpatterns = [\n # static pages\n@@ -15,7 +15,8 @@\n path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name=\"login\"),\n path('logout/', CustomLogoutView.as_view(), name=\"logout\"),\n path(\"my-sources/\", UserSourceListView.as_view(), name=\"my-sources\"),\n- path(\"users/<int:user_id>\", UserDetailView.as_view(), name=\"user-detail\"),\n+ path(\"users/<int:pk>\", UserDetailView.as_view(), name=\"user-detail\"),\n+ path(\"users/\", UserListView.as_view(), name=\"user-list\"),\n \n # chant\n path(\"chants/\", ChantListView.as_view(), name=\"chant-list\"),\ndiff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py\n--- a/django/cantusdb_project/main_app/views/user.py\n+++ b/django/cantusdb_project/main_app/views/user.py\n@@ -8,17 +8,17 @@\n from django.core.paginator import Paginator\n from django.contrib.auth.views import LogoutView\n from django.contrib import messages\n+from extra_views import SearchableListMixin\n \n class UserDetailView(DetailView):\n \"\"\"Detail view for User model\n \n- Accessed by /users/<user_id>\n+ Accessed by /users/<pk>\n \"\"\"\n \n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n- pk_url_kwarg = 'user_id'\t\n \n class UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n@@ -56,3 +56,18 @@\n 'You have successfully logged out!'\n )\n return next_page\n+\n+class UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n+ \"\"\"Searchable List view for User model\n+\n+ Accessed by /users/\n+\n+ When passed a ``?q=<query>`` argument in the GET request, it will filter users\n+ based on the fields defined in ``search_fields`` with the ``icontains`` lookup\n+ \"\"\"\n+\n+ model = get_user_model()\n+ search_fields = [\"first_name\", \"last_name\", \"institution\", \"city\", \"country\"]\n+ paginate_by = 100\n+ template_name = \"user_list.html\"\n+ context_object_name = \"users\"\ndiff --git a/django/cantusdb_project/users/models.py b/django/cantusdb_project/users/models.py\n--- a/django/cantusdb_project/users/models.py\n+++ b/django/cantusdb_project/users/models.py\n@@ -1,5 +1,6 @@\n from django.db import models\n from django.contrib.auth.models import AbstractUser\n+from django.urls.base import reverse\n \n \n class User(AbstractUser):\n@@ -21,4 +22,9 @@\n if self.name:\n return self.name\n else:\n- return self.username\n\\ No newline at end of file\n+ return self.username\n+\n+ def get_absolute_url(self) -> str:\n+ \"\"\"Get the absolute URL for an instance of a model.\"\"\"\n+ detail_name = self.__class__.__name__.lower() + \"-detail\"\n+ return reverse(detail_name, kwargs={\"pk\": self.pk})\n", "issue": "currently missing a user list page\nimplement it similar to indexer list page\n", "before_files": [{"content": "from django.urls import path, include\nfrom main_app.views import *\nfrom main_app.views import views\nfrom main_app.views.sequence import SequenceEditView\nfrom main_app.views.source import SourceCreateView, SourceEditView\nfrom main_app.views.chant import ChantEditVolpianoView\nfrom django.contrib.auth import views as auth_views\nfrom main_app.views.user import UserDetailView, UserSourceListView, CustomLogoutView\n\nurlpatterns = [\n # static pages\n path(\"index/\", FullIndexView.as_view(), name=\"chant-index\"),\n path(\"contact/\", views.contact_us, name=\"contact\"),\n # login/logout/user\n path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name=\"login\"),\n path('logout/', CustomLogoutView.as_view(), name=\"logout\"),\n path(\"my-sources/\", UserSourceListView.as_view(), name=\"my-sources\"),\n path(\"users/<int:user_id>\", UserDetailView.as_view(), name=\"user-detail\"),\n\n # chant\n path(\"chants/\", ChantListView.as_view(), name=\"chant-list\"),\n path(\"chant/<int:pk>\", ChantDetailView.as_view(), name=\"chant-detail\"),\n path(\"chant-search/\", ChantSearchView.as_view(), name=\"chant-search\"),\n path(\n \"chant-create/<int:source_pk>\", ChantCreateView.as_view(), name=\"chant-create\"\n ),\n path(\"chant-update/<int:pk>\", ChantUpdateView.as_view(), name=\"chant-update\"),\n path(\n \"id/<str:cantus_id>\", ChantByCantusIDView.as_view(), name=\"chant-by-cantus-id\"\n ),\n path(\"chant-delete/<int:pk>\", ChantDeleteView.as_view(), name=\"chant-delete\"),\n path(\n \"edit-volpiano/<int:source_id>\", \n ChantEditVolpianoView.as_view(), \n name=\"source-edit-volpiano\"\n ),\n # feast\n path(\"feasts/\", FeastListView.as_view(), name=\"feast-list\"),\n path(\"feast/<int:pk>\", FeastDetailView.as_view(), name=\"feast-detail\"),\n # genre\n path(\"genres/\", GenreListView.as_view(), name=\"genre-list\"),\n path(\"genre/<int:pk>\", GenreDetailView.as_view(), name=\"genre-detail\"),\n # indexer\n path(\"indexers/\", IndexerListView.as_view(), name=\"indexer-list\"),\n path(\"indexer/<int:pk>\", IndexerDetailView.as_view(), name=\"indexer-detail\"),\n # office\n path(\"offices/\", OfficeListView.as_view(), name=\"office-list\"),\n path(\"office/<int:pk>\", OfficeDetailView.as_view(), name=\"office-detail\"),\n # sequence\n path(\"sequences/\", SequenceListView.as_view(), name=\"sequence-list\"),\n path(\"sequence/<int:pk>\", SequenceDetailView.as_view(), name=\"sequence-detail\",),\n path(\"edit-sequence/<int:sequence_id>\", SequenceEditView.as_view(), name=\"sequence-edit\",),\n # source\n path(\"sources/\", SourceListView.as_view(), name=\"source-list\"),\n path(\"source/<int:pk>\", SourceDetailView.as_view(), name=\"source-detail\"),\n path(\n \"source-create/\", \n SourceCreateView.as_view(), \n name=\"source-create\"\n ),\n path(\n \"edit-source/<int:source_id>\", \n SourceEditView.as_view(), \n name=\"source-edit\"\n ),\n # melody\n path(\"melody/\", MelodySearchView.as_view(), name=\"melody-search\"),\n path(\"ajax/melody/<str:cantus_id>\", views.ajax_melody_list, name=\"ajax-melody\"),\n path(\"ajax/melody-search/\", views.ajax_melody_search, name=\"ajax-melody-search\",),\n # json api\n path(\"json-sources/\", views.json_sources_export, name=\"json-sources-export\"),\n path(\"json-node/<str:id>\", views.json_node_export, name=\"json-node-export\"),\n path(\"json-nextchants/<str:cantus_id>\", views.json_nextchants, name=\"json-nextchants\"),\n path(\n \"json-melody/<str:cantus_id>\",\n views.json_melody_export,\n name=\"json-melody-export\",\n ),\n # misc search\n path(\n \"chant-search-ms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n path(\"ci-search/<str:search_term>\", CISearchView.as_view(), name=\"ci-search\"),\n path(\n \"ajax/search-bar/<str:search_term>\",\n views.ajax_search_bar,\n name=\"ajax-search-bar\",\n ),\n # misc\n path(\"content-statistics\", views.items_count, name=\"items-count\"),\n path(\"csv/<str:source_id>\", views.csv_export, name=\"csv-export\"),\n path(\n \"ajax/concordance/<str:cantus_id>\",\n views.ajax_concordance_list,\n name=\"ajax-concordance\",\n ),\n]\n\nhandler404 = 'main_app.views.views.handle404'\n", "path": "django/cantusdb_project/main_app/urls.py"}, {"content": "from urllib import request\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView\nfrom django.contrib import messages\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<user_id>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n pk_url_kwarg = 'user_id'\t\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user) \n ).order_by(\"title\")\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_created_sources = Source.objects.filter(created_by=self.request.user)\n paginator = Paginator(user_created_sources, 10)\n page_number = self.request.GET.get('page2')\n page_obj = paginator.get_page(page_number)\n\n context[\"user_created_sources_page_obj\"] = page_obj\n return context\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(\n self.request, \n 'You have successfully logged out!'\n )\n return next_page\n", "path": "django/cantusdb_project/main_app/views/user.py"}, {"content": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n\nclass User(AbstractUser):\n institution = models.CharField(max_length=255, blank=True, null=True)\n city = models.CharField(max_length=255, blank=True, null=True)\n country = models.CharField(max_length=255, blank=True, null=True)\n website = models.URLField(blank=True, null=True)\n sources_user_can_edit = models.ManyToManyField(\"main_app.Source\", related_name=\"users_who_can_edit_this_source\", blank=True)\n full_name = models.CharField(max_length=255, blank=True, null=True)\n\n @property\n def name(self):\n if self.full_name:\n return self.full_name\n elif self.first_name and self.last_name:\n return f'{self.first_name} {self.last_name}'\n\n def __str__(self):\n if self.name:\n return self.name\n else:\n return self.username", "path": "django/cantusdb_project/users/models.py"}]} | 2,666 | 851 |
gh_patches_debug_5468 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wordlists are not being parsed correctly
# Bug
## Description
`crypo_util.{words,nouns,adjectives}` all contain an empty string as their last element.
</issue>
<code>
[start of securedrop/crypto_util.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from base64 import b32encode
5 import os
6 import subprocess
7
8 from Crypto.Random import random
9 import gnupg
10 from gnupg._util import _is_stream, _make_binary_stream
11 import scrypt
12
13 import config
14 import store
15
16 # to fix gpg error #78 on production
17 os.environ['USERNAME'] = 'www-data'
18
19 GPG_KEY_TYPE = "RSA"
20 if os.environ.get('SECUREDROP_ENV') == 'test':
21 # Optimize crypto to speed up tests (at the expense of security - DO NOT
22 # use these settings in production)
23 GPG_KEY_LENGTH = 1024
24 SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)
25 else: # pragma: no cover
26 GPG_KEY_LENGTH = 4096
27 SCRYPT_PARAMS = config.SCRYPT_PARAMS
28
29 SCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER
30 SCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER
31
32 DEFAULT_WORDS_IN_RANDOM_ID = 8
33
34
35 # Make sure these pass before the app can run
36 # TODO: Add more tests
37 def do_runtime_tests():
38 assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)
39 # crash if we don't have srm:
40 try:
41 subprocess.check_call(['srm'], stdout=subprocess.PIPE)
42 except subprocess.CalledProcessError:
43 pass
44
45 do_runtime_tests()
46
47 gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)
48
49 words = open(config.WORD_LIST).read().split('\n')
50 nouns = open(config.NOUNS).read().split('\n')
51 adjectives = open(config.ADJECTIVES).read().split('\n')
52
53
54 class CryptoException(Exception):
55 pass
56
57
58 def clean(s, also=''):
59 """
60 >>> clean("Hello, world!")
61 Traceback (most recent call last):
62 ...
63 CryptoException: invalid input: Hello, world!
64 >>> clean("Helloworld")
65 'Helloworld'
66 """
67 # safe characters for every possible word in the wordlist includes capital
68 # letters because codename hashes are base32-encoded with capital letters
69 ok = (' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJ'
70 'KLMNOPQRSTUVWXYZ')
71 for c in s:
72 if c not in ok and c not in also:
73 raise CryptoException("invalid input: {0}".format(s))
74 # scrypt.hash requires input of type str. Since the wordlist is all ASCII
75 # characters, this conversion is not problematic
76 return str(s)
77
78
79 def genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):
80 return ' '.join(random.choice(words) for x in range(words_in_random_id))
81
82
83 def display_id():
84 return ' '.join([random.choice(adjectives), random.choice(nouns)])
85
86
87 def hash_codename(codename, salt=SCRYPT_ID_PEPPER):
88 """Salts and hashes a codename using scrypt.
89
90 :param str codename: A source's codename.
91 :param str salt: The salt to mix with the codename when hashing.
92 :returns: A base32 encoded string; the salted codename hash.
93 """
94 return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))
95
96
97 def genkeypair(name, secret):
98 """Generate a GPG key through batch file key generation. A source's
99 codename is salted with SCRYPT_GPG_PEPPER and hashed with scrypt to
100 provide the passphrase used to encrypt their private key. Their name
101 should be their filesystem id.
102
103 >>> if not gpg.list_keys(hash_codename('randomid')):
104 ... genkeypair(hash_codename('randomid'), 'randomid').type
105 ... else:
106 ... u'P'
107 u'P'
108
109 :param str name: The source's filesystem id (their codename, salted
110 with SCRYPT_ID_PEPPER, and hashed with scrypt).
111 :param str secret: The source's codename.
112 :returns: a :class:`GenKey <gnupg._parser.GenKey>` object, on which
113 the ``__str__()`` method may be called to return the
114 generated key's fingeprint.
115
116 """
117 name = clean(name)
118 secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
119 return gpg.gen_key(gpg.gen_key_input(
120 key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
121 passphrase=secret,
122 name_email=name
123 ))
124
125
126 def delete_reply_keypair(source_filesystem_id):
127 key = getkey(source_filesystem_id)
128 # If this source was never flagged for review, they won't have a reply
129 # keypair
130 if not key:
131 return
132 # The private key needs to be deleted before the public key can be deleted
133 # http://pythonhosted.org/python-gnupg/#deleting-keys
134 gpg.delete_keys(key, True) # private key
135 gpg.delete_keys(key) # public key
136 # TODO: srm?
137
138
139 def getkey(name):
140 for key in gpg.list_keys():
141 for uid in key['uids']:
142 if name in uid:
143 return key['fingerprint']
144 return None
145
146
147 def encrypt(plaintext, fingerprints, output=None):
148 # Verify the output path
149 if output:
150 store.verify(output)
151
152 if not isinstance(fingerprints, (list, tuple)):
153 fingerprints = [fingerprints, ]
154 # Remove any spaces from provided fingerprints GPG outputs fingerprints
155 # with spaces for readability, but requires the spaces to be removed when
156 # using fingerprints to specify recipients.
157 fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]
158
159 if not _is_stream(plaintext):
160 plaintext = _make_binary_stream(plaintext, "utf_8")
161
162 out = gpg.encrypt(plaintext,
163 *fingerprints,
164 output=output,
165 always_trust=True,
166 armor=False)
167 if out.ok:
168 return out.data
169 else:
170 raise CryptoException(out.stderr)
171
172
173 def decrypt(secret, ciphertext):
174 """
175 >>> key = genkeypair('randomid', 'randomid')
176 >>> decrypt('randomid',
177 ... encrypt('Goodbye, cruel world!', str(key))
178 ... )
179 'Goodbye, cruel world!'
180 """
181 hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
182 return gpg.decrypt(ciphertext, passphrase=hashed_codename).data
183
184 if __name__ == "__main__": # pragma: no cover
185 import doctest
186 doctest.testmod()
187
[end of securedrop/crypto_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -46,9 +46,9 @@
gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)
-words = open(config.WORD_LIST).read().split('\n')
-nouns = open(config.NOUNS).read().split('\n')
-adjectives = open(config.ADJECTIVES).read().split('\n')
+words = open(config.WORD_LIST).read().rstrip('\n').split('\n')
+nouns = open(config.NOUNS).read().rstrip('\n').split('\n')
+adjectives = open(config.ADJECTIVES).read().rstrip('\n').split('\n')
class CryptoException(Exception):
| {"golden_diff": "diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py\n--- a/securedrop/crypto_util.py\n+++ b/securedrop/crypto_util.py\n@@ -46,9 +46,9 @@\n \n gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)\n \n-words = open(config.WORD_LIST).read().split('\\n')\n-nouns = open(config.NOUNS).read().split('\\n')\n-adjectives = open(config.ADJECTIVES).read().split('\\n')\n+words = open(config.WORD_LIST).read().rstrip('\\n').split('\\n')\n+nouns = open(config.NOUNS).read().rstrip('\\n').split('\\n')\n+adjectives = open(config.ADJECTIVES).read().rstrip('\\n').split('\\n')\n \n \n class CryptoException(Exception):\n", "issue": "Wordlists are not being parsed correctly\n# Bug\r\n\r\n## Description\r\n\r\n`crypo_util.{words,nouns,adjectives}` all contain an empty string as their last element.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom base64 import b32encode\nimport os\nimport subprocess\n\nfrom Crypto.Random import random\nimport gnupg\nfrom gnupg._util import _is_stream, _make_binary_stream\nimport scrypt\n\nimport config\nimport store\n\n# to fix gpg error #78 on production\nos.environ['USERNAME'] = 'www-data'\n\nGPG_KEY_TYPE = \"RSA\"\nif os.environ.get('SECUREDROP_ENV') == 'test':\n # Optimize crypto to speed up tests (at the expense of security - DO NOT\n # use these settings in production)\n GPG_KEY_LENGTH = 1024\n SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)\nelse: # pragma: no cover\n GPG_KEY_LENGTH = 4096\n SCRYPT_PARAMS = config.SCRYPT_PARAMS\n\nSCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER\nSCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER\n\nDEFAULT_WORDS_IN_RANDOM_ID = 8\n\n\n# Make sure these pass before the app can run\n# TODO: Add more tests\ndef do_runtime_tests():\n assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)\n # crash if we don't have srm:\n try:\n subprocess.check_call(['srm'], stdout=subprocess.PIPE)\n except subprocess.CalledProcessError:\n pass\n\ndo_runtime_tests()\n\ngpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)\n\nwords = open(config.WORD_LIST).read().split('\\n')\nnouns = open(config.NOUNS).read().split('\\n')\nadjectives = open(config.ADJECTIVES).read().split('\\n')\n\n\nclass CryptoException(Exception):\n pass\n\n\ndef clean(s, also=''):\n \"\"\"\n >>> clean(\"Hello, world!\")\n Traceback (most recent call last):\n ...\n CryptoException: invalid input: Hello, world!\n >>> clean(\"Helloworld\")\n 'Helloworld'\n \"\"\"\n # safe characters for every possible word in the wordlist includes capital\n # letters because codename hashes are base32-encoded with capital letters\n ok = (' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJ'\n 'KLMNOPQRSTUVWXYZ')\n for c in s:\n if c not in ok and c not in also:\n raise CryptoException(\"invalid input: {0}\".format(s))\n # scrypt.hash requires input of type str. Since the wordlist is all ASCII\n # characters, this conversion is not problematic\n return str(s)\n\n\ndef genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):\n return ' '.join(random.choice(words) for x in range(words_in_random_id))\n\n\ndef display_id():\n return ' '.join([random.choice(adjectives), random.choice(nouns)])\n\n\ndef hash_codename(codename, salt=SCRYPT_ID_PEPPER):\n \"\"\"Salts and hashes a codename using scrypt.\n\n :param str codename: A source's codename.\n :param str salt: The salt to mix with the codename when hashing.\n :returns: A base32 encoded string; the salted codename hash.\n \"\"\"\n return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))\n\n\ndef genkeypair(name, secret):\n \"\"\"Generate a GPG key through batch file key generation. A source's\n codename is salted with SCRYPT_GPG_PEPPER and hashed with scrypt to\n provide the passphrase used to encrypt their private key. Their name\n should be their filesystem id.\n\n >>> if not gpg.list_keys(hash_codename('randomid')):\n ... genkeypair(hash_codename('randomid'), 'randomid').type\n ... else:\n ... u'P'\n u'P'\n\n :param str name: The source's filesystem id (their codename, salted\n with SCRYPT_ID_PEPPER, and hashed with scrypt).\n :param str secret: The source's codename.\n :returns: a :class:`GenKey <gnupg._parser.GenKey>` object, on which\n the ``__str__()`` method may be called to return the\n generated key's fingeprint.\n\n \"\"\"\n name = clean(name)\n secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.gen_key(gpg.gen_key_input(\n key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,\n passphrase=secret,\n name_email=name\n ))\n\n\ndef delete_reply_keypair(source_filesystem_id):\n key = getkey(source_filesystem_id)\n # If this source was never flagged for review, they won't have a reply\n # keypair\n if not key:\n return\n # The private key needs to be deleted before the public key can be deleted\n # http://pythonhosted.org/python-gnupg/#deleting-keys\n gpg.delete_keys(key, True) # private key\n gpg.delete_keys(key) # public key\n # TODO: srm?\n\n\ndef getkey(name):\n for key in gpg.list_keys():\n for uid in key['uids']:\n if name in uid:\n return key['fingerprint']\n return None\n\n\ndef encrypt(plaintext, fingerprints, output=None):\n # Verify the output path\n if output:\n store.verify(output)\n\n if not isinstance(fingerprints, (list, tuple)):\n fingerprints = [fingerprints, ]\n # Remove any spaces from provided fingerprints GPG outputs fingerprints\n # with spaces for readability, but requires the spaces to be removed when\n # using fingerprints to specify recipients.\n fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]\n\n if not _is_stream(plaintext):\n plaintext = _make_binary_stream(plaintext, \"utf_8\")\n\n out = gpg.encrypt(plaintext,\n *fingerprints,\n output=output,\n always_trust=True,\n armor=False)\n if out.ok:\n return out.data\n else:\n raise CryptoException(out.stderr)\n\n\ndef decrypt(secret, ciphertext):\n \"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> decrypt('randomid',\n ... encrypt('Goodbye, cruel world!', str(key))\n ... )\n 'Goodbye, cruel world!'\n \"\"\"\n hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.decrypt(ciphertext, passphrase=hashed_codename).data\n\nif __name__ == \"__main__\": # pragma: no cover\n import doctest\n doctest.testmod()\n", "path": "securedrop/crypto_util.py"}]} | 2,530 | 182 |
gh_patches_debug_36567 | rasdani/github-patches | git_diff | Slicer__ExtensionsIndex-1759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad dependencies kill entire extension build
[SlicerVideoCamera name change](https://github.com/Slicer/ExtensionsIndex/commit/93d1942ed51a5c576f477dab77df9529ce788754) introduced this [bug](https://github.com/Slicer/ExtensionsIndex/commit/4181b49933cca4bf1420d1b8f7b54017bbfe131c) where an extension had a non-existent dependency.
Resulting [CMake Error](https://slicer.cdash.org/build/2225046/configure) terminated the whole build process.
</issue>
<code>
[start of scripts/check_description_files.py]
1 #!/usr/bin/env python
2
3 """
4 Python 3.x CLI for validating extension description files.
5 """
6
7 import argparse
8 import os
9 import sys
10 import textwrap
11 import urllib.parse as urlparse
12
13 from functools import wraps
14
15
16 class ExtensionCheckError(RuntimeError):
17 """Exception raised when a particular extension check failed.
18 """
19 def __init__(self, extension_name, check_name, details):
20 self.extension_name = extension_name
21 self.check_name = check_name
22 self.details = details
23
24 def __str__(self):
25 return self.details
26
27
28 def require_metadata_key(metadata_key):
29 check_name = "require_metadata_key"
30
31 def dec(fun):
32 @wraps(fun)
33 def wrapped(*args, **kwargs):
34 extension_name = args[0]
35 metadata = args[1]
36 if metadata_key not in metadata.keys():
37 raise ExtensionCheckError(extension_name, check_name, "%s key is missing" % metadata_key)
38 return fun(*args, **kwargs)
39 return wrapped
40 return dec
41
42
43 def parse_s4ext(ext_file_path):
44 """Parse a Slicer extension description file.
45 :param ext_file_path: Path to a Slicer extension description file.
46 :return: Dictionary of extension metadata.
47 """
48 ext_metadata = {}
49 with open(ext_file_path) as ext_file:
50 for line in ext_file:
51 if not line.strip() or line.startswith("#"):
52 continue
53 fields = [field.strip() for field in line.split(' ', 1)]
54 assert(len(fields) <= 2)
55 ext_metadata[fields[0]] = fields[1] if len(fields) == 2 else None
56 return ext_metadata
57
58
59 @require_metadata_key("scmurl")
60 def check_scmurl_syntax(extension_name, metadata):
61 check_name = "check_scmurl_syntax"
62
63 if "://" not in metadata["scmurl"]:
64 raise ExtensionCheckError(extension_name, check_name, "scmurl do not match scheme://host/path")
65
66 supported_schemes = ["git", "https", "svn"]
67 scheme = urlparse.urlsplit(metadata["scmurl"]).scheme
68 if scheme not in supported_schemes:
69 raise ExtensionCheckError(
70 extension_name, check_name,
71 "scmurl scheme is '%s' but it should by any of %s" % (scheme, supported_schemes))
72
73
74 @require_metadata_key("scmurl")
75 @require_metadata_key("scm")
76 def check_git_repository_name(extension_name, metadata):
77 """See https://www.slicer.org/wiki/Documentation/Nightly/Developers/FAQ#Should_the_name_of_the_source_repository_match_the_name_of_the_extension_.3F
78 """
79 check_name = "check_git_repository_name"
80
81 if metadata["scm"] != "git":
82 return
83
84 repo_name = os.path.splitext(urlparse.urlsplit(metadata["scmurl"]).path.split("/")[-1])[0]
85
86 if not repo_name.startswith("Slicer"):
87
88 variations = [prefix + repo_name for prefix in ["Slicer-", "Slicer_", "SlicerExtension-", "SlicerExtension_"]]
89
90 raise ExtensionCheckError(
91 extension_name, check_name,
92 textwrap.dedent("""
93 extension repository name is '%s'. Please, consider changing it to 'Slicer%s' or any of
94 these variations %s.
95 """ % (
96 repo_name, repo_name, variations)))
97
98
99 def main():
100 parser = argparse.ArgumentParser(
101 description='Validate extension description files.')
102 parser.add_argument(
103 "--check-git-repository-name", action="store_true",
104 help="Check extension git repository name. Disabled by default.")
105 parser.add_argument("/path/to/description.s4ext", nargs='*')
106 args = parser.parse_args()
107
108 checks = []
109
110 if args.check_git_repository_name:
111 checks.append(check_git_repository_name)
112
113 if not checks:
114 checks = [
115 check_scmurl_syntax,
116 ]
117
118 total_failure_count = 0
119
120 file_paths = getattr(args, "/path/to/description.s4ext")
121 for file_path in file_paths:
122 extension_name = os.path.splitext(os.path.basename(file_path))[0]
123
124 failures = []
125
126 metadata = parse_s4ext(file_path)
127 for check in checks:
128 try:
129 check(extension_name, metadata)
130 except ExtensionCheckError as exc:
131 failures.append(str(exc))
132
133 if failures:
134 total_failure_count += len(failures)
135 print("%s.s4ext" % extension_name)
136 for failure in set(failures):
137 print(" %s" % failure)
138
139 print("Checked %d description files: Found %d errors" % (len(file_paths), total_failure_count))
140 sys.exit(total_failure_count)
141
142
143 if __name__ == "__main__":
144 main()
145
[end of scripts/check_description_files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/check_description_files.py b/scripts/check_description_files.py
--- a/scripts/check_description_files.py
+++ b/scripts/check_description_files.py
@@ -95,6 +95,38 @@
""" % (
repo_name, repo_name, variations)))
+def check_dependencies(directory):
+ import os
+ required_extensions = {} # for each extension it contains a list of extensions that require it
+ available_extensions = []
+ for filename in os.listdir(directory):
+ f = os.path.join(directory, filename)
+ if not os.path.isfile(f):
+ continue
+ extension_name = os.path.splitext(os.path.basename(filename))[0]
+ available_extensions.append(extension_name)
+ extension_description = parse_s4ext(f)
+ if 'depends' not in extension_description:
+ continue
+ dependencies = extension_description['depends'].split(' ')
+ for dependency in dependencies:
+ if dependency == 'NA':
+ # special value, just a placeholder that must be ignored
+ continue
+ if dependency in required_extensions:
+ required_extensions[dependency].append(extension_name)
+ else:
+ required_extensions[dependency] = [extension_name]
+ print(f"Checked dependency between {len(available_extensions)} extensions.")
+ error_count = 0
+ for extension in required_extensions:
+ if extension in available_extensions:
+ # required extension is found
+ continue
+ required_by_extensions = ', '.join(required_extensions[extension])
+ print(f"{extension} extension is not found. It is required by extension: {required_by_extensions}.")
+ error_count += 1
+ return error_count
def main():
parser = argparse.ArgumentParser(
@@ -102,6 +134,7 @@
parser.add_argument(
"--check-git-repository-name", action="store_true",
help="Check extension git repository name. Disabled by default.")
+ parser.add_argument("-d", "--check-dependencies", help="Check all extension dsecription files in the provided folder.")
parser.add_argument("/path/to/description.s4ext", nargs='*')
args = parser.parse_args()
@@ -136,7 +169,13 @@
for failure in set(failures):
print(" %s" % failure)
- print("Checked %d description files: Found %d errors" % (len(file_paths), total_failure_count))
+ print(f"Checked content of {len(file_paths)} description files.")
+
+
+ if args.check_dependencies:
+ total_failure_count += check_dependencies(args.check_dependencies)
+
+ print(f"Total errors found in extension descriptions: {total_failure_count}")
sys.exit(total_failure_count)
| {"golden_diff": "diff --git a/scripts/check_description_files.py b/scripts/check_description_files.py\n--- a/scripts/check_description_files.py\n+++ b/scripts/check_description_files.py\n@@ -95,6 +95,38 @@\n \"\"\" % (\n repo_name, repo_name, variations)))\n \n+def check_dependencies(directory):\n+ import os\n+ required_extensions = {} # for each extension it contains a list of extensions that require it\n+ available_extensions = []\n+ for filename in os.listdir(directory):\n+ f = os.path.join(directory, filename)\n+ if not os.path.isfile(f):\n+ continue\n+ extension_name = os.path.splitext(os.path.basename(filename))[0]\n+ available_extensions.append(extension_name)\n+ extension_description = parse_s4ext(f)\n+ if 'depends' not in extension_description:\n+ continue\n+ dependencies = extension_description['depends'].split(' ')\n+ for dependency in dependencies:\n+ if dependency == 'NA':\n+ # special value, just a placeholder that must be ignored\n+ continue\n+ if dependency in required_extensions:\n+ required_extensions[dependency].append(extension_name)\n+ else:\n+ required_extensions[dependency] = [extension_name]\n+ print(f\"Checked dependency between {len(available_extensions)} extensions.\")\n+ error_count = 0\n+ for extension in required_extensions:\n+ if extension in available_extensions:\n+ # required extension is found\n+ continue\n+ required_by_extensions = ', '.join(required_extensions[extension])\n+ print(f\"{extension} extension is not found. It is required by extension: {required_by_extensions}.\")\n+ error_count += 1\n+ return error_count\n \n def main():\n parser = argparse.ArgumentParser(\n@@ -102,6 +134,7 @@\n parser.add_argument(\n \"--check-git-repository-name\", action=\"store_true\",\n help=\"Check extension git repository name. Disabled by default.\")\n+ parser.add_argument(\"-d\", \"--check-dependencies\", help=\"Check all extension dsecription files in the provided folder.\")\n parser.add_argument(\"/path/to/description.s4ext\", nargs='*')\n args = parser.parse_args()\n \n@@ -136,7 +169,13 @@\n for failure in set(failures):\n print(\" %s\" % failure)\n \n- print(\"Checked %d description files: Found %d errors\" % (len(file_paths), total_failure_count))\n+ print(f\"Checked content of {len(file_paths)} description files.\")\n+\n+\n+ if args.check_dependencies:\n+ total_failure_count += check_dependencies(args.check_dependencies)\n+\n+ print(f\"Total errors found in extension descriptions: {total_failure_count}\")\n sys.exit(total_failure_count)\n", "issue": "Bad dependencies kill entire extension build\n[SlicerVideoCamera name change](https://github.com/Slicer/ExtensionsIndex/commit/93d1942ed51a5c576f477dab77df9529ce788754) introduced this [bug](https://github.com/Slicer/ExtensionsIndex/commit/4181b49933cca4bf1420d1b8f7b54017bbfe131c) where an extension had a non-existent dependency.\r\n\r\nResulting [CMake Error](https://slicer.cdash.org/build/2225046/configure) terminated the whole build process.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nPython 3.x CLI for validating extension description files.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport textwrap\nimport urllib.parse as urlparse\n\nfrom functools import wraps\n\n\nclass ExtensionCheckError(RuntimeError):\n \"\"\"Exception raised when a particular extension check failed.\n \"\"\"\n def __init__(self, extension_name, check_name, details):\n self.extension_name = extension_name\n self.check_name = check_name\n self.details = details\n\n def __str__(self):\n return self.details\n\n\ndef require_metadata_key(metadata_key):\n check_name = \"require_metadata_key\"\n\n def dec(fun):\n @wraps(fun)\n def wrapped(*args, **kwargs):\n extension_name = args[0]\n metadata = args[1]\n if metadata_key not in metadata.keys():\n raise ExtensionCheckError(extension_name, check_name, \"%s key is missing\" % metadata_key)\n return fun(*args, **kwargs)\n return wrapped\n return dec\n\n\ndef parse_s4ext(ext_file_path):\n \"\"\"Parse a Slicer extension description file.\n :param ext_file_path: Path to a Slicer extension description file.\n :return: Dictionary of extension metadata.\n \"\"\"\n ext_metadata = {}\n with open(ext_file_path) as ext_file:\n for line in ext_file:\n if not line.strip() or line.startswith(\"#\"):\n continue\n fields = [field.strip() for field in line.split(' ', 1)]\n assert(len(fields) <= 2)\n ext_metadata[fields[0]] = fields[1] if len(fields) == 2 else None\n return ext_metadata\n\n\n@require_metadata_key(\"scmurl\")\ndef check_scmurl_syntax(extension_name, metadata):\n check_name = \"check_scmurl_syntax\"\n\n if \"://\" not in metadata[\"scmurl\"]:\n raise ExtensionCheckError(extension_name, check_name, \"scmurl do not match scheme://host/path\")\n\n supported_schemes = [\"git\", \"https\", \"svn\"]\n scheme = urlparse.urlsplit(metadata[\"scmurl\"]).scheme\n if scheme not in supported_schemes:\n raise ExtensionCheckError(\n extension_name, check_name,\n \"scmurl scheme is '%s' but it should by any of %s\" % (scheme, supported_schemes))\n\n\n@require_metadata_key(\"scmurl\")\n@require_metadata_key(\"scm\")\ndef check_git_repository_name(extension_name, metadata):\n \"\"\"See https://www.slicer.org/wiki/Documentation/Nightly/Developers/FAQ#Should_the_name_of_the_source_repository_match_the_name_of_the_extension_.3F\n \"\"\"\n check_name = \"check_git_repository_name\"\n\n if metadata[\"scm\"] != \"git\":\n return\n\n repo_name = os.path.splitext(urlparse.urlsplit(metadata[\"scmurl\"]).path.split(\"/\")[-1])[0]\n\n if not repo_name.startswith(\"Slicer\"):\n\n variations = [prefix + repo_name for prefix in [\"Slicer-\", \"Slicer_\", \"SlicerExtension-\", \"SlicerExtension_\"]]\n\n raise ExtensionCheckError(\n extension_name, check_name,\n textwrap.dedent(\"\"\"\n extension repository name is '%s'. Please, consider changing it to 'Slicer%s' or any of\n these variations %s.\n \"\"\" % (\n repo_name, repo_name, variations)))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Validate extension description files.')\n parser.add_argument(\n \"--check-git-repository-name\", action=\"store_true\",\n help=\"Check extension git repository name. Disabled by default.\")\n parser.add_argument(\"/path/to/description.s4ext\", nargs='*')\n args = parser.parse_args()\n\n checks = []\n\n if args.check_git_repository_name:\n checks.append(check_git_repository_name)\n\n if not checks:\n checks = [\n check_scmurl_syntax,\n ]\n\n total_failure_count = 0\n\n file_paths = getattr(args, \"/path/to/description.s4ext\")\n for file_path in file_paths:\n extension_name = os.path.splitext(os.path.basename(file_path))[0]\n\n failures = []\n \n metadata = parse_s4ext(file_path)\n for check in checks:\n try:\n check(extension_name, metadata)\n except ExtensionCheckError as exc:\n failures.append(str(exc))\n\n if failures:\n total_failure_count += len(failures)\n print(\"%s.s4ext\" % extension_name)\n for failure in set(failures):\n print(\" %s\" % failure)\n\n print(\"Checked %d description files: Found %d errors\" % (len(file_paths), total_failure_count))\n sys.exit(total_failure_count)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/check_description_files.py"}]} | 2,050 | 591 |
gh_patches_debug_32378 | rasdani/github-patches | git_diff | optuna__optuna-4684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove experimental label from `_ProgressBar`
### Motivation
Several issues related to `_ProgressBar` have been already addressed (ref: https://github.com/optuna/optuna/issues/2892, https://github.com/optuna/optuna/issues/2957, https://github.com/optuna/optuna/issues/2958). Now we can remove the experimental label from `_ProgressBar`.
### Suggestion
Remove the `@experimental_func` decorator from `_ProgressBar`. Also, `_init_valid` method can be removed as explained in [TODO comment](https://github.com/optuna/optuna/blob/806448420863606c113aeb2e33457acf022be066/optuna/progress_bar.py#L57C28-L58).
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/progress_bar.py]
1 import logging
2 from typing import Any
3 from typing import Optional
4 from typing import TYPE_CHECKING
5
6 from tqdm.auto import tqdm
7
8 from optuna import logging as optuna_logging
9 from optuna._experimental import experimental_func
10
11
12 if TYPE_CHECKING:
13 from optuna.study import Study
14
15 _tqdm_handler: Optional["_TqdmLoggingHandler"] = None
16
17
18 # Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02
19 class _TqdmLoggingHandler(logging.StreamHandler):
20 def emit(self, record: Any) -> None:
21 try:
22 msg = self.format(record)
23 tqdm.write(msg)
24 self.flush()
25 except (KeyboardInterrupt, SystemExit):
26 raise
27 except Exception:
28 self.handleError(record)
29
30
31 class _ProgressBar:
32 """Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.
33
34 Args:
35 is_valid:
36 Whether to show progress bars in :func:`~optuna.study.Study.optimize`.
37 n_trials:
38 The number of trials.
39 timeout:
40 Stop study after the given number of second(s).
41 """
42
43 def __init__(
44 self,
45 is_valid: bool,
46 n_trials: Optional[int] = None,
47 timeout: Optional[float] = None,
48 ) -> None:
49 self._is_valid = is_valid and (n_trials or timeout) is not None
50 self._n_trials = n_trials
51 self._timeout = timeout
52 self._last_elapsed_seconds = 0.0
53
54 if self._is_valid:
55 self._init_valid()
56
57 # TODO(hvy): Remove initialization indirection via this method when the progress bar is no
58 # longer experimental.
59 @experimental_func("1.2.0", name="Progress bar")
60 def _init_valid(self) -> None:
61 if self._n_trials is not None:
62 self._progress_bar = tqdm(total=self._n_trials)
63
64 elif self._timeout is not None:
65 total = tqdm.format_interval(self._timeout)
66 fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
67 self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
68 else:
69 assert False
70
71 global _tqdm_handler
72
73 _tqdm_handler = _TqdmLoggingHandler()
74 _tqdm_handler.setLevel(logging.INFO)
75 _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
76 optuna_logging.disable_default_handler()
77 optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
78
79 def update(self, elapsed_seconds: float, study: "Study") -> None:
80 """Update the progress bars if ``is_valid`` is :obj:`True`.
81
82 Args:
83 elapsed_seconds:
84 The time past since :func:`~optuna.study.Study.optimize` started.
85 study:
86 The current study object.
87 """
88
89 if self._is_valid:
90 if not study._is_multi_objective():
91 # Not updating the progress bar when there are no complete trial.
92 try:
93 msg = (
94 f"Best trial: {study.best_trial.number}. "
95 f"Best value: {study.best_value:.6g}"
96 )
97
98 self._progress_bar.set_description(msg)
99 except ValueError:
100 pass
101
102 if self._n_trials is not None:
103 self._progress_bar.update(1)
104 if self._timeout is not None:
105 self._progress_bar.set_postfix_str(
106 "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
107 )
108
109 elif self._timeout is not None:
110 time_diff = elapsed_seconds - self._last_elapsed_seconds
111 if elapsed_seconds > self._timeout:
112 # Clip elapsed time to avoid tqdm warnings.
113 time_diff -= elapsed_seconds - self._timeout
114
115 self._progress_bar.update(time_diff)
116 self._last_elapsed_seconds = elapsed_seconds
117
118 else:
119 assert False
120
121 def close(self) -> None:
122 """Close progress bars."""
123
124 if self._is_valid:
125 self._progress_bar.close()
126 assert _tqdm_handler is not None
127 optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)
128 optuna_logging.enable_default_handler()
129
[end of optuna/progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py
--- a/optuna/progress_bar.py
+++ b/optuna/progress_bar.py
@@ -6,7 +6,6 @@
from tqdm.auto import tqdm
from optuna import logging as optuna_logging
-from optuna._experimental import experimental_func
if TYPE_CHECKING:
@@ -52,29 +51,22 @@
self._last_elapsed_seconds = 0.0
if self._is_valid:
- self._init_valid()
-
- # TODO(hvy): Remove initialization indirection via this method when the progress bar is no
- # longer experimental.
- @experimental_func("1.2.0", name="Progress bar")
- def _init_valid(self) -> None:
- if self._n_trials is not None:
- self._progress_bar = tqdm(total=self._n_trials)
-
- elif self._timeout is not None:
- total = tqdm.format_interval(self._timeout)
- fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
- self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
- else:
- assert False
-
- global _tqdm_handler
-
- _tqdm_handler = _TqdmLoggingHandler()
- _tqdm_handler.setLevel(logging.INFO)
- _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
- optuna_logging.disable_default_handler()
- optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
+ if self._n_trials is not None:
+ self._progress_bar = tqdm(total=self._n_trials)
+ elif self._timeout is not None:
+ total = tqdm.format_interval(self._timeout)
+ fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
+ else:
+ assert False
+
+ global _tqdm_handler
+
+ _tqdm_handler = _TqdmLoggingHandler()
+ _tqdm_handler.setLevel(logging.INFO)
+ _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
+ optuna_logging.disable_default_handler()
+ optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
def update(self, elapsed_seconds: float, study: "Study") -> None:
"""Update the progress bars if ``is_valid`` is :obj:`True`.
| {"golden_diff": "diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py\n--- a/optuna/progress_bar.py\n+++ b/optuna/progress_bar.py\n@@ -6,7 +6,6 @@\n from tqdm.auto import tqdm\n \n from optuna import logging as optuna_logging\n-from optuna._experimental import experimental_func\n \n \n if TYPE_CHECKING:\n@@ -52,29 +51,22 @@\n self._last_elapsed_seconds = 0.0\n \n if self._is_valid:\n- self._init_valid()\n-\n- # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n- # longer experimental.\n- @experimental_func(\"1.2.0\", name=\"Progress bar\")\n- def _init_valid(self) -> None:\n- if self._n_trials is not None:\n- self._progress_bar = tqdm(total=self._n_trials)\n-\n- elif self._timeout is not None:\n- total = tqdm.format_interval(self._timeout)\n- fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n- self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n- else:\n- assert False\n-\n- global _tqdm_handler\n-\n- _tqdm_handler = _TqdmLoggingHandler()\n- _tqdm_handler.setLevel(logging.INFO)\n- _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n- optuna_logging.disable_default_handler()\n- optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n+ if self._n_trials is not None:\n+ self._progress_bar = tqdm(total=self._n_trials)\n+ elif self._timeout is not None:\n+ total = tqdm.format_interval(self._timeout)\n+ fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n+ else:\n+ assert False\n+\n+ global _tqdm_handler\n+\n+ _tqdm_handler = _TqdmLoggingHandler()\n+ _tqdm_handler.setLevel(logging.INFO)\n+ _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n+ optuna_logging.disable_default_handler()\n+ optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n \n def update(self, elapsed_seconds: float, study: \"Study\") -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n", "issue": "Remove experimental label from `_ProgressBar`\n### Motivation\n\nSeveral issues related to `_ProgressBar` have been already addressed (ref: https://github.com/optuna/optuna/issues/2892, https://github.com/optuna/optuna/issues/2957, https://github.com/optuna/optuna/issues/2958). Now we can remove the experimental label from `_ProgressBar`.\n\n### Suggestion\n\nRemove the `@experimental_func` decorator from `_ProgressBar`. Also, `_init_valid` method can be removed as explained in [TODO comment](https://github.com/optuna/optuna/blob/806448420863606c113aeb2e33457acf022be066/optuna/progress_bar.py#L57C28-L58).\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom tqdm.auto import tqdm\n\nfrom optuna import logging as optuna_logging\nfrom optuna._experimental import experimental_func\n\n\nif TYPE_CHECKING:\n from optuna.study import Study\n\n_tqdm_handler: Optional[\"_TqdmLoggingHandler\"] = None\n\n\n# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02\nclass _TqdmLoggingHandler(logging.StreamHandler):\n def emit(self, record: Any) -> None:\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n\n\nclass _ProgressBar:\n \"\"\"Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.\n\n Args:\n is_valid:\n Whether to show progress bars in :func:`~optuna.study.Study.optimize`.\n n_trials:\n The number of trials.\n timeout:\n Stop study after the given number of second(s).\n \"\"\"\n\n def __init__(\n self,\n is_valid: bool,\n n_trials: Optional[int] = None,\n timeout: Optional[float] = None,\n ) -> None:\n self._is_valid = is_valid and (n_trials or timeout) is not None\n self._n_trials = n_trials\n self._timeout = timeout\n self._last_elapsed_seconds = 0.0\n\n if self._is_valid:\n self._init_valid()\n\n # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n # longer experimental.\n @experimental_func(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n if self._n_trials is not None:\n self._progress_bar = tqdm(total=self._n_trials)\n\n elif self._timeout is not None:\n total = tqdm.format_interval(self._timeout)\n fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n else:\n assert False\n\n global _tqdm_handler\n\n _tqdm_handler = _TqdmLoggingHandler()\n _tqdm_handler.setLevel(logging.INFO)\n _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n\n def update(self, elapsed_seconds: float, study: \"Study\") -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n\n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n study:\n The current study object.\n \"\"\"\n\n if self._is_valid:\n if not study._is_multi_objective():\n # Not updating the progress bar when there are no complete trial.\n try:\n msg = (\n f\"Best trial: {study.best_trial.number}. \"\n f\"Best value: {study.best_value:.6g}\"\n )\n\n self._progress_bar.set_description(msg)\n except ValueError:\n pass\n\n if self._n_trials is not None:\n self._progress_bar.update(1)\n if self._timeout is not None:\n self._progress_bar.set_postfix_str(\n \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n )\n\n elif self._timeout is not None:\n time_diff = elapsed_seconds - self._last_elapsed_seconds\n if elapsed_seconds > self._timeout:\n # Clip elapsed time to avoid tqdm warnings.\n time_diff -= elapsed_seconds - self._timeout\n\n self._progress_bar.update(time_diff)\n self._last_elapsed_seconds = elapsed_seconds\n\n else:\n assert False\n\n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)\n optuna_logging.enable_default_handler()\n", "path": "optuna/progress_bar.py"}]} | 1,953 | 570 |
gh_patches_debug_21275 | rasdani/github-patches | git_diff | goauthentik__authentik-5657 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SCIM provider automatic sync not triggering since 2023.5.0
**Describe the bug**
SCIM provider automatic sync is not triggering (hourly & on user/group change).
Just upgraded to 2023.5.0 yesterday and (I think) that sync is broken since upgrade. I was using 2023.3.1 and 2023.4.1 previously with the SCIM provider to provision AWS SSO (IAM Identity Center) and those triggers worked (but without PATCH added in 2023.5.0 so that was the main reason for this upgrade).
**To Reproduce**
Configure SCIM provider and wait for the full sync hourly, try to add/remove a member in a group or create a new user.
**Expected behavior**
From [documentation](https://goauthentik.io/docs/providers/scim/#syncing):
```
Data is synchronized in multiple ways:
When a user/group is created/modified/deleted, that action is sent to all SCIM providers
Periodically (once an hour), all SCIM providers are fully synchronized
```
**Screenshots**
No screenshots for this case.
**Logs**
There's no sync events events in logs. Logs are configured in trace level.
The only SCIM events I see are:
```
2023-05-17 09:24:58 | {"event": "Task started", "level": "info", "logger": "authentik.root.celery", "pid": 5476, "task_id": "d2f7357b-caf3-40b9-8750-93134c36badf", "task_name": "scim_signal_direct", "timestamp": "2023-05-17T12:24:58.174683"}
2023-05-17 09:24:58 | {"event": "Task finished", "level": "info", "logger": "authentik.root.celery", "pid": 5476, "state": "SUCCESS", "task_id": "d2f7357b-caf3-40b9-8750-93134c36badf", "task_name": "scim_signal_direct", "timestamp": "2023-05-17T12:24:58.210445"}
```
**Version and Deployment (please complete the following information):**
- authentik version:2023.5.0
- Deployment: Helm chart
**Additional context**
If I run the sync manually it works, but the full sync adds/replace objects it doesn't remove users from a group since I think it's only incremental and the removal of a member should be done whent the group is modified.
</issue>
<code>
[start of authentik/providers/scim/tasks.py]
1 """SCIM Provider tasks"""
2 from typing import Any, Optional
3
4 from celery.result import allow_join_result
5 from django.core.paginator import Paginator
6 from django.db.models import Model, QuerySet
7 from django.utils.text import slugify
8 from django.utils.translation import gettext_lazy as _
9 from pydanticscim.responses import PatchOp
10 from structlog.stdlib import get_logger
11
12 from authentik.core.models import Group, User
13 from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
14 from authentik.lib.utils.reflection import path_to_class
15 from authentik.providers.scim.clients import PAGE_SIZE
16 from authentik.providers.scim.clients.base import SCIMClient
17 from authentik.providers.scim.clients.exceptions import SCIMRequestException, StopSync
18 from authentik.providers.scim.clients.group import SCIMGroupClient
19 from authentik.providers.scim.clients.user import SCIMUserClient
20 from authentik.providers.scim.models import SCIMProvider
21 from authentik.root.celery import CELERY_APP
22
23 LOGGER = get_logger(__name__)
24
25
26 def client_for_model(provider: SCIMProvider, model: Model) -> SCIMClient:
27 """Get SCIM client for model"""
28 if isinstance(model, User):
29 return SCIMUserClient(provider)
30 if isinstance(model, Group):
31 return SCIMGroupClient(provider)
32 raise ValueError(f"Invalid model {model}")
33
34
35 @CELERY_APP.task()
36 def scim_sync_all():
37 """Run sync for all providers"""
38 for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
39 scim_sync.delay(provider.pk)
40
41
42 @CELERY_APP.task(bind=True, base=MonitoredTask)
43 def scim_sync(self: MonitoredTask, provider_pk: int) -> None:
44 """Run SCIM full sync for provider"""
45 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
46 if not provider:
47 return
48 self.set_uid(slugify(provider.name))
49 result = TaskResult(TaskResultStatus.SUCCESSFUL, [])
50 result.messages.append(_("Starting full SCIM sync"))
51 LOGGER.debug("Starting SCIM sync")
52 users_paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)
53 groups_paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)
54 with allow_join_result():
55 try:
56 for page in users_paginator.page_range:
57 result.messages.append(_("Syncing page %(page)d of users" % {"page": page}))
58 for msg in scim_sync_users.delay(page, provider_pk).get():
59 result.messages.append(msg)
60 for page in groups_paginator.page_range:
61 result.messages.append(_("Syncing page %(page)d of groups" % {"page": page}))
62 for msg in scim_sync_group.delay(page, provider_pk).get():
63 result.messages.append(msg)
64 except StopSync as exc:
65 self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
66 return
67 self.set_status(result)
68
69
70 @CELERY_APP.task()
71 def scim_sync_users(page: int, provider_pk: int):
72 """Sync single or multiple users to SCIM"""
73 messages = []
74 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
75 if not provider:
76 return messages
77 try:
78 client = SCIMUserClient(provider)
79 except SCIMRequestException:
80 return messages
81 paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)
82 LOGGER.debug("starting user sync for page", page=page)
83 for user in paginator.page(page).object_list:
84 try:
85 client.write(user)
86 except SCIMRequestException as exc:
87 LOGGER.warning("failed to sync user", exc=exc, user=user)
88 messages.append(
89 _(
90 "Failed to sync user %(user_name)s due to remote error: %(error)s"
91 % {
92 "user_name": user.username,
93 "error": exc.detail(),
94 }
95 )
96 )
97 except StopSync as exc:
98 LOGGER.warning("Stopping sync", exc=exc)
99 messages.append(
100 _(
101 "Stopping sync due to error: %(error)s"
102 % {
103 "error": exc.detail(),
104 }
105 )
106 )
107 break
108 return messages
109
110
111 @CELERY_APP.task()
112 def scim_sync_group(page: int, provider_pk: int):
113 """Sync single or multiple groups to SCIM"""
114 messages = []
115 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
116 if not provider:
117 return messages
118 try:
119 client = SCIMGroupClient(provider)
120 except SCIMRequestException:
121 return messages
122 paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)
123 LOGGER.debug("starting group sync for page", page=page)
124 for group in paginator.page(page).object_list:
125 try:
126 client.write(group)
127 except SCIMRequestException as exc:
128 LOGGER.warning("failed to sync group", exc=exc, group=group)
129 messages.append(
130 _(
131 "Failed to sync group %(group_name)s due to remote error: %(error)s"
132 % {
133 "group_name": group.name,
134 "error": exc.detail(),
135 }
136 )
137 )
138 except StopSync as exc:
139 LOGGER.warning("Stopping sync", exc=exc)
140 messages.append(
141 _(
142 "Stopping sync due to error: %(error)s"
143 % {
144 "error": exc.detail(),
145 }
146 )
147 )
148 break
149 return messages
150
151
152 @CELERY_APP.task()
153 def scim_signal_direct(model: str, pk: Any, raw_op: str):
154 """Handler for post_save and pre_delete signal"""
155 model_class: type[Model] = path_to_class(model)
156 instance = model_class.objects.filter(pk=pk).first()
157 if not instance:
158 return
159 operation = PatchOp(raw_op)
160 for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
161 client = client_for_model(provider, instance)
162 # Check if the object is allowed within the provider's restrictions
163 queryset: Optional[QuerySet] = None
164 if isinstance(instance, User):
165 queryset = provider.get_user_qs()
166 if isinstance(instance, Group):
167 queryset = provider.get_group_qs()
168 if not queryset:
169 continue
170
171 # The queryset we get from the provider must include the instance we've got given
172 # otherwise ignore this provider
173 if not queryset.filter(pk=instance.pk).exists():
174 continue
175
176 try:
177 if operation == PatchOp.add:
178 client.write(instance)
179 if operation == PatchOp.remove:
180 client.delete(instance)
181 except (StopSync, SCIMRequestException) as exc:
182 LOGGER.warning(exc)
183
184
185 @CELERY_APP.task()
186 def scim_signal_m2m(group_pk: str, action: str, pk_set: list[int]):
187 """Update m2m (group membership)"""
188 group = Group.objects.filter(pk=group_pk).first()
189 if not group:
190 return
191 for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
192 # Check if the object is allowed within the provider's restrictions
193 queryset: QuerySet = provider.get_group_qs()
194 # The queryset we get from the provider must include the instance we've got given
195 # otherwise ignore this provider
196 if not queryset.filter(pk=group_pk).exists():
197 continue
198
199 client = SCIMGroupClient(provider)
200 try:
201 operation = None
202 if action == "post_add":
203 operation = PatchOp.add
204 if action == "post_remove":
205 operation = PatchOp.remove
206 client.update_group(group, operation, pk_set)
207 except (StopSync, SCIMRequestException) as exc:
208 LOGGER.warning(exc)
209
[end of authentik/providers/scim/tasks.py]
[start of authentik/providers/scim/api/providers.py]
1 """SCIM Provider API Views"""
2 from django.utils.text import slugify
3 from drf_spectacular.utils import OpenApiResponse, extend_schema
4 from rest_framework.decorators import action
5 from rest_framework.request import Request
6 from rest_framework.response import Response
7 from rest_framework.viewsets import ModelViewSet
8
9 from authentik.admin.api.tasks import TaskSerializer
10 from authentik.core.api.providers import ProviderSerializer
11 from authentik.core.api.used_by import UsedByMixin
12 from authentik.events.monitored_tasks import TaskInfo
13 from authentik.providers.scim.models import SCIMProvider
14
15
16 class SCIMProviderSerializer(ProviderSerializer):
17 """SCIMProvider Serializer"""
18
19 class Meta:
20 model = SCIMProvider
21 fields = [
22 "pk",
23 "name",
24 "property_mappings",
25 "property_mappings_group",
26 "component",
27 "assigned_application_slug",
28 "assigned_application_name",
29 "verbose_name",
30 "verbose_name_plural",
31 "meta_model_name",
32 "url",
33 "token",
34 "exclude_users_service_account",
35 "filter_group",
36 ]
37 extra_kwargs = {}
38
39
40 class SCIMProviderViewSet(UsedByMixin, ModelViewSet):
41 """SCIMProvider Viewset"""
42
43 queryset = SCIMProvider.objects.all()
44 serializer_class = SCIMProviderSerializer
45 filterset_fields = ["name", "exclude_users_service_account", "url", "filter_group"]
46 search_fields = ["name", "url"]
47 ordering = ["name", "url"]
48
49 @extend_schema(
50 responses={
51 200: TaskSerializer(),
52 404: OpenApiResponse(description="Task not found"),
53 }
54 )
55 @action(methods=["GET"], detail=True, pagination_class=None, filter_backends=[])
56 def sync_status(self, request: Request, pk: int) -> Response:
57 """Get provider's sync status"""
58 provider = self.get_object()
59 task = TaskInfo.by_name(f"scim_sync:{slugify(provider.name)}")
60 if not task:
61 return Response(status=404)
62 return Response(TaskSerializer(task).data)
63
[end of authentik/providers/scim/api/providers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/providers/scim/api/providers.py b/authentik/providers/scim/api/providers.py
--- a/authentik/providers/scim/api/providers.py
+++ b/authentik/providers/scim/api/providers.py
@@ -24,8 +24,8 @@
"property_mappings",
"property_mappings_group",
"component",
- "assigned_application_slug",
- "assigned_application_name",
+ "assigned_backchannel_application_slug",
+ "assigned_backchannel_application_name",
"verbose_name",
"verbose_name_plural",
"meta_model_name",
diff --git a/authentik/providers/scim/tasks.py b/authentik/providers/scim/tasks.py
--- a/authentik/providers/scim/tasks.py
+++ b/authentik/providers/scim/tasks.py
@@ -42,7 +42,9 @@
@CELERY_APP.task(bind=True, base=MonitoredTask)
def scim_sync(self: MonitoredTask, provider_pk: int) -> None:
"""Run SCIM full sync for provider"""
- provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
+ provider: SCIMProvider = SCIMProvider.objects.filter(
+ pk=provider_pk, backchannel_application__isnull=False
+ ).first()
if not provider:
return
self.set_uid(slugify(provider.name))
| {"golden_diff": "diff --git a/authentik/providers/scim/api/providers.py b/authentik/providers/scim/api/providers.py\n--- a/authentik/providers/scim/api/providers.py\n+++ b/authentik/providers/scim/api/providers.py\n@@ -24,8 +24,8 @@\n \"property_mappings\",\n \"property_mappings_group\",\n \"component\",\n- \"assigned_application_slug\",\n- \"assigned_application_name\",\n+ \"assigned_backchannel_application_slug\",\n+ \"assigned_backchannel_application_name\",\n \"verbose_name\",\n \"verbose_name_plural\",\n \"meta_model_name\",\ndiff --git a/authentik/providers/scim/tasks.py b/authentik/providers/scim/tasks.py\n--- a/authentik/providers/scim/tasks.py\n+++ b/authentik/providers/scim/tasks.py\n@@ -42,7 +42,9 @@\n @CELERY_APP.task(bind=True, base=MonitoredTask)\n def scim_sync(self: MonitoredTask, provider_pk: int) -> None:\n \"\"\"Run SCIM full sync for provider\"\"\"\n- provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n+ provider: SCIMProvider = SCIMProvider.objects.filter(\n+ pk=provider_pk, backchannel_application__isnull=False\n+ ).first()\n if not provider:\n return\n self.set_uid(slugify(provider.name))\n", "issue": "SCIM provider automatic sync not triggering since 2023.5.0\n**Describe the bug**\r\nSCIM provider automatic sync is not triggering (hourly & on user/group change).\r\nJust upgraded to 2023.5.0 yesterday and (I think) that sync is broken since upgrade. I was using 2023.3.1 and 2023.4.1 previously with the SCIM provider to provision AWS SSO (IAM Identity Center) and those triggers worked (but without PATCH added in 2023.5.0 so that was the main reason for this upgrade).\r\n\r\n**To Reproduce**\r\nConfigure SCIM provider and wait for the full sync hourly, try to add/remove a member in a group or create a new user.\r\n\r\n**Expected behavior**\r\nFrom [documentation](https://goauthentik.io/docs/providers/scim/#syncing):\r\n\r\n```\r\nData is synchronized in multiple ways:\r\n\r\nWhen a user/group is created/modified/deleted, that action is sent to all SCIM providers\r\nPeriodically (once an hour), all SCIM providers are fully synchronized\r\n```\r\n\r\n**Screenshots**\r\nNo screenshots for this case.\r\n\r\n**Logs**\r\nThere's no sync events events in logs. Logs are configured in trace level.\r\nThe only SCIM events I see are:\r\n\r\n```\r\n2023-05-17 09:24:58 | {\"event\": \"Task started\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 5476, \"task_id\": \"d2f7357b-caf3-40b9-8750-93134c36badf\", \"task_name\": \"scim_signal_direct\", \"timestamp\": \"2023-05-17T12:24:58.174683\"}\r\n2023-05-17 09:24:58 | {\"event\": \"Task finished\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 5476, \"state\": \"SUCCESS\", \"task_id\": \"d2f7357b-caf3-40b9-8750-93134c36badf\", \"task_name\": \"scim_signal_direct\", \"timestamp\": \"2023-05-17T12:24:58.210445\"}\r\n```\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version:2023.5.0\r\n- Deployment: Helm chart\r\n\r\n**Additional context**\r\nIf I run the sync manually it works, but the full sync adds/replace objects it doesn't remove users from a group since I think it's only incremental and the removal of a member should be done whent the group is modified.\n", "before_files": [{"content": "\"\"\"SCIM Provider tasks\"\"\"\nfrom typing import Any, Optional\n\nfrom celery.result import allow_join_result\nfrom django.core.paginator import Paginator\nfrom django.db.models import Model, QuerySet\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom pydanticscim.responses import PatchOp\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import Group, User\nfrom authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\nfrom authentik.lib.utils.reflection import path_to_class\nfrom authentik.providers.scim.clients import PAGE_SIZE\nfrom authentik.providers.scim.clients.base import SCIMClient\nfrom authentik.providers.scim.clients.exceptions import SCIMRequestException, StopSync\nfrom authentik.providers.scim.clients.group import SCIMGroupClient\nfrom authentik.providers.scim.clients.user import SCIMUserClient\nfrom authentik.providers.scim.models import SCIMProvider\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger(__name__)\n\n\ndef client_for_model(provider: SCIMProvider, model: Model) -> SCIMClient:\n \"\"\"Get SCIM client for model\"\"\"\n if isinstance(model, User):\n return SCIMUserClient(provider)\n if isinstance(model, Group):\n return SCIMGroupClient(provider)\n raise ValueError(f\"Invalid model {model}\")\n\n\n@CELERY_APP.task()\ndef scim_sync_all():\n \"\"\"Run sync for all providers\"\"\"\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n scim_sync.delay(provider.pk)\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\ndef scim_sync(self: MonitoredTask, provider_pk: int) -> None:\n \"\"\"Run SCIM full sync for provider\"\"\"\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return\n self.set_uid(slugify(provider.name))\n result = TaskResult(TaskResultStatus.SUCCESSFUL, [])\n result.messages.append(_(\"Starting full SCIM sync\"))\n LOGGER.debug(\"Starting SCIM sync\")\n users_paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n groups_paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n with allow_join_result():\n try:\n for page in users_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of users\" % {\"page\": page}))\n for msg in scim_sync_users.delay(page, provider_pk).get():\n result.messages.append(msg)\n for page in groups_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of groups\" % {\"page\": page}))\n for msg in scim_sync_group.delay(page, provider_pk).get():\n result.messages.append(msg)\n except StopSync as exc:\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n return\n self.set_status(result)\n\n\n@CELERY_APP.task()\ndef scim_sync_users(page: int, provider_pk: int):\n \"\"\"Sync single or multiple users to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMUserClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting user sync for page\", page=page)\n for user in paginator.page(page).object_list:\n try:\n client.write(user)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync user\", exc=exc, user=user)\n messages.append(\n _(\n \"Failed to sync user %(user_name)s due to remote error: %(error)s\"\n % {\n \"user_name\": user.username,\n \"error\": exc.detail(),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": exc.detail(),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_sync_group(page: int, provider_pk: int):\n \"\"\"Sync single or multiple groups to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMGroupClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting group sync for page\", page=page)\n for group in paginator.page(page).object_list:\n try:\n client.write(group)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync group\", exc=exc, group=group)\n messages.append(\n _(\n \"Failed to sync group %(group_name)s due to remote error: %(error)s\"\n % {\n \"group_name\": group.name,\n \"error\": exc.detail(),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": exc.detail(),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_signal_direct(model: str, pk: Any, raw_op: str):\n \"\"\"Handler for post_save and pre_delete signal\"\"\"\n model_class: type[Model] = path_to_class(model)\n instance = model_class.objects.filter(pk=pk).first()\n if not instance:\n return\n operation = PatchOp(raw_op)\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n client = client_for_model(provider, instance)\n # Check if the object is allowed within the provider's restrictions\n queryset: Optional[QuerySet] = None\n if isinstance(instance, User):\n queryset = provider.get_user_qs()\n if isinstance(instance, Group):\n queryset = provider.get_group_qs()\n if not queryset:\n continue\n\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=instance.pk).exists():\n continue\n\n try:\n if operation == PatchOp.add:\n client.write(instance)\n if operation == PatchOp.remove:\n client.delete(instance)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n\n\n@CELERY_APP.task()\ndef scim_signal_m2m(group_pk: str, action: str, pk_set: list[int]):\n \"\"\"Update m2m (group membership)\"\"\"\n group = Group.objects.filter(pk=group_pk).first()\n if not group:\n return\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n # Check if the object is allowed within the provider's restrictions\n queryset: QuerySet = provider.get_group_qs()\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=group_pk).exists():\n continue\n\n client = SCIMGroupClient(provider)\n try:\n operation = None\n if action == \"post_add\":\n operation = PatchOp.add\n if action == \"post_remove\":\n operation = PatchOp.remove\n client.update_group(group, operation, pk_set)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n", "path": "authentik/providers/scim/tasks.py"}, {"content": "\"\"\"SCIM Provider API Views\"\"\"\nfrom django.utils.text import slugify\nfrom drf_spectacular.utils import OpenApiResponse, extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.admin.api.tasks import TaskSerializer\nfrom authentik.core.api.providers import ProviderSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.events.monitored_tasks import TaskInfo\nfrom authentik.providers.scim.models import SCIMProvider\n\n\nclass SCIMProviderSerializer(ProviderSerializer):\n \"\"\"SCIMProvider Serializer\"\"\"\n\n class Meta:\n model = SCIMProvider\n fields = [\n \"pk\",\n \"name\",\n \"property_mappings\",\n \"property_mappings_group\",\n \"component\",\n \"assigned_application_slug\",\n \"assigned_application_name\",\n \"verbose_name\",\n \"verbose_name_plural\",\n \"meta_model_name\",\n \"url\",\n \"token\",\n \"exclude_users_service_account\",\n \"filter_group\",\n ]\n extra_kwargs = {}\n\n\nclass SCIMProviderViewSet(UsedByMixin, ModelViewSet):\n \"\"\"SCIMProvider Viewset\"\"\"\n\n queryset = SCIMProvider.objects.all()\n serializer_class = SCIMProviderSerializer\n filterset_fields = [\"name\", \"exclude_users_service_account\", \"url\", \"filter_group\"]\n search_fields = [\"name\", \"url\"]\n ordering = [\"name\", \"url\"]\n\n @extend_schema(\n responses={\n 200: TaskSerializer(),\n 404: OpenApiResponse(description=\"Task not found\"),\n }\n )\n @action(methods=[\"GET\"], detail=True, pagination_class=None, filter_backends=[])\n def sync_status(self, request: Request, pk: int) -> Response:\n \"\"\"Get provider's sync status\"\"\"\n provider = self.get_object()\n task = TaskInfo.by_name(f\"scim_sync:{slugify(provider.name)}\")\n if not task:\n return Response(status=404)\n return Response(TaskSerializer(task).data)\n", "path": "authentik/providers/scim/api/providers.py"}]} | 3,934 | 293 |
gh_patches_debug_19203 | rasdani/github-patches | git_diff | e-valuation__EvaP-1367 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inline datatables localization files to speed up first paint
Right now, datatables gets the localization file in form of a URL (see [datatables.html](https://github.com/fsr-itse/EvaP/blob/master/evap/evaluation/templates/datatables.html)), that is, it starts an ajax request when it starts processing the tables, and waits with processing until the result has been received.
both locales should be included into the compressed javascript or inlined into the html template so they are loaded earlier.
we do something similar for the [bootstrap datetimepicker](https://github.com/fsr-itse/EvaP/blob/028b6301e3eed446d93ae8675030d82c68d46886/evap/evaluation/templates/bootstrap_datetimepicker.html). unfortunately, it's not that easy in this case, since the localization files are json files, not javascript files.
one approach would be to turn the json files to js files, and simply putting the datastructure inside into a variable with the name of the corresponding locale.
</issue>
<code>
[start of evap/evaluation/templatetags/evaluation_filters.py]
1 from collections import namedtuple
2
3 from django.forms import TypedChoiceField
4 from django.template import Library
5 from django.utils.translation import ugettext_lazy as _
6
7 from evap.evaluation.models import BASE_UNIPOLAR_CHOICES
8 from evap.rewards.tools import can_reward_points_be_used_by
9 from evap.student.forms import HeadingField
10
11
12 # the names displayed for contributors
13 STATE_NAMES = {
14 'new': _('new'),
15 'prepared': _('prepared'),
16 'editor_approved': _('editor approved'),
17 'approved': _('approved'),
18 'in_evaluation': _('in evaluation'),
19 'evaluated': _('evaluated'),
20 'reviewed': _('reviewed'),
21 'published': _('published'),
22 }
23
24
25 # the descriptions used in tooltips for contributors
26 STATE_DESCRIPTIONS = {
27 'new': _('The evaluation was newly created and will be prepared by the evaluation team.'),
28 'prepared': _('The evaluation was prepared by the evaluation team and is now available for editors.'),
29 'editor_approved': _('The evaluation was approved by an editor and will now be checked by the evaluation team.'),
30 'approved': _('All preparations are finished. The evaluation will begin once the defined start date is reached.'),
31 'in_evaluation': _('The evaluation is currently running until the defined end date is reached.'),
32 'evaluated': _('The evaluation has finished and will now be reviewed by the evaluation team.'),
33 'reviewed': _('The evaluation has finished and was reviewed by the evaluation team. You will receive an email when its results are published.'),
34 'published': _('The results for this evaluation have been published.'),
35 }
36
37
38 # values for approval states shown to staff
39 StateValues = namedtuple('StateValues', ('order', 'icon', 'filter', 'description'))
40 APPROVAL_STATES = {
41 'new': StateValues(0, 'fas fa-circle icon-yellow', 'fa-circle icon-yellow', _('In preparation')),
42 'prepared': StateValues(2, 'far fa-square icon-gray', 'fa-square icon-gray', _('Awaiting editor review')),
43 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'fa-check-square icon-yellow', _('Approved by editor, awaiting manager review')),
44 'approved': StateValues(3, 'far fa-check-square icon-green', 'fa-check-square icon-green', _('Approved by manager')),
45 }
46
47
48 register = Library()
49
50
51 @register.filter(name='zip')
52 def _zip(a, b):
53 return zip(a, b)
54
55
56 @register.filter()
57 def zip_choices(counts, choices):
58 return zip(counts, choices.names, choices.colors, choices.values)
59
60
61 @register.filter
62 def ordering_index(evaluation):
63 if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:
64 return evaluation.days_until_evaluation
65 elif evaluation.state == "in_evaluation":
66 return 100000 + evaluation.days_left_for_evaluation
67 return 200000 + evaluation.days_left_for_evaluation
68
69
70 # from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/
71 @register.filter
72 def percentage(fraction, population):
73 try:
74 return "{0:.0f}%".format(int(float(fraction) / float(population) * 100))
75 except ValueError:
76 return None
77 except ZeroDivisionError:
78 return None
79
80
81 @register.filter
82 def percentage_one_decimal(fraction, population):
83 try:
84 return "{0:.1f}%".format((float(fraction) / float(population)) * 100)
85 except ValueError:
86 return None
87 except ZeroDivisionError:
88 return None
89
90
91 @register.filter
92 def to_colors(choices):
93 if not choices:
94 # When displaying the course distribution, there are no associated voting choices.
95 # In that case, we just use the colors of a unipolar scale.
96 return BASE_UNIPOLAR_CHOICES['colors']
97 return choices.colors
98
99
100 @register.filter
101 def statename(state):
102 return STATE_NAMES.get(state)
103
104
105 @register.filter
106 def statedescription(state):
107 return STATE_DESCRIPTIONS.get(state)
108
109
110 @register.filter
111 def approval_state_values(state):
112 if state in APPROVAL_STATES:
113 return APPROVAL_STATES[state]
114 elif state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:
115 return APPROVAL_STATES['approved']
116 return None
117
118
119 @register.filter
120 def approval_state_icon(state):
121 if state in APPROVAL_STATES:
122 return APPROVAL_STATES[state].icon
123 elif state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:
124 return APPROVAL_STATES['approved'].icon
125 return None
126
127
128 @register.filter
129 def can_results_page_be_seen_by(evaluation, user):
130 return evaluation.can_results_page_be_seen_by(user)
131
132
133 @register.filter(name='can_reward_points_be_used_by')
134 def _can_reward_points_be_used_by(user):
135 return can_reward_points_be_used_by(user)
136
137
138 @register.filter
139 def is_choice_field(field):
140 return isinstance(field.field, TypedChoiceField)
141
142
143 @register.filter
144 def is_heading_field(field):
145 return isinstance(field.field, HeadingField)
146
147
148 @register.filter
149 def is_user_editor_or_delegate(evaluation, user):
150 return evaluation.is_user_editor_or_delegate(user)
151
152
153 @register.filter
154 def is_user_responsible_or_contributor_or_delegate(evaluation, user):
155 return evaluation.is_user_responsible_or_contributor_or_delegate(user)
156
157 @register.filter
158 def message_class(level):
159 return {
160 'debug': 'info',
161 'info': 'info',
162 'success': 'success',
163 'warning': 'warning',
164 'error': 'danger',
165 }.get(level, 'info')
166
167
168 @register.filter
169 def hours_and_minutes(time_left_for_evaluation):
170 hours = time_left_for_evaluation.seconds // 3600
171 minutes = (time_left_for_evaluation.seconds // 60) % 60
172 return "{:02}:{:02}".format(hours, minutes)
173
174
175 @register.filter
176 def has_nonresponsible_editor(evaluation):
177 return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()
178
[end of evap/evaluation/templatetags/evaluation_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py
--- a/evap/evaluation/templatetags/evaluation_filters.py
+++ b/evap/evaluation/templatetags/evaluation_filters.py
@@ -38,10 +38,10 @@
# values for approval states shown to staff
StateValues = namedtuple('StateValues', ('order', 'icon', 'filter', 'description'))
APPROVAL_STATES = {
- 'new': StateValues(0, 'fas fa-circle icon-yellow', 'fa-circle icon-yellow', _('In preparation')),
- 'prepared': StateValues(2, 'far fa-square icon-gray', 'fa-square icon-gray', _('Awaiting editor review')),
- 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'fa-check-square icon-yellow', _('Approved by editor, awaiting manager review')),
- 'approved': StateValues(3, 'far fa-check-square icon-green', 'fa-check-square icon-green', _('Approved by manager')),
+ 'new': StateValues(0, 'fas fa-circle icon-yellow', 'new', _('In preparation')),
+ 'prepared': StateValues(2, 'far fa-square icon-gray', 'prepared', _('Awaiting editor review')),
+ 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'editor_approved', _('Approved by editor, awaiting manager review')),
+ 'approved': StateValues(3, 'far fa-check-square icon-green', 'approved', _('Approved by manager')),
}
| {"golden_diff": "diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py\n--- a/evap/evaluation/templatetags/evaluation_filters.py\n+++ b/evap/evaluation/templatetags/evaluation_filters.py\n@@ -38,10 +38,10 @@\n # values for approval states shown to staff\n StateValues = namedtuple('StateValues', ('order', 'icon', 'filter', 'description'))\n APPROVAL_STATES = {\n- 'new': StateValues(0, 'fas fa-circle icon-yellow', 'fa-circle icon-yellow', _('In preparation')),\n- 'prepared': StateValues(2, 'far fa-square icon-gray', 'fa-square icon-gray', _('Awaiting editor review')),\n- 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'fa-check-square icon-yellow', _('Approved by editor, awaiting manager review')),\n- 'approved': StateValues(3, 'far fa-check-square icon-green', 'fa-check-square icon-green', _('Approved by manager')),\n+ 'new': StateValues(0, 'fas fa-circle icon-yellow', 'new', _('In preparation')),\n+ 'prepared': StateValues(2, 'far fa-square icon-gray', 'prepared', _('Awaiting editor review')),\n+ 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'editor_approved', _('Approved by editor, awaiting manager review')),\n+ 'approved': StateValues(3, 'far fa-check-square icon-green', 'approved', _('Approved by manager')),\n }\n", "issue": "Inline datatables localization files to speed up first paint\nRight now, datatables gets the localization file in form of a URL (see [datatables.html](https://github.com/fsr-itse/EvaP/blob/master/evap/evaluation/templates/datatables.html)), that is, it starts an ajax request when it starts processing the tables, and waits with processing until the result has been received.\r\n\r\nboth locales should be included into the compressed javascript or inlined into the html template so they are loaded earlier.\r\n\r\nwe do something similar for the [bootstrap datetimepicker](https://github.com/fsr-itse/EvaP/blob/028b6301e3eed446d93ae8675030d82c68d46886/evap/evaluation/templates/bootstrap_datetimepicker.html). unfortunately, it's not that easy in this case, since the localization files are json files, not javascript files.\r\n\r\none approach would be to turn the json files to js files, and simply putting the datastructure inside into a variable with the name of the corresponding locale.\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom django.forms import TypedChoiceField\nfrom django.template import Library\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\n# the names displayed for contributors\nSTATE_NAMES = {\n 'new': _('new'),\n 'prepared': _('prepared'),\n 'editor_approved': _('editor approved'),\n 'approved': _('approved'),\n 'in_evaluation': _('in evaluation'),\n 'evaluated': _('evaluated'),\n 'reviewed': _('reviewed'),\n 'published': _('published'),\n}\n\n\n# the descriptions used in tooltips for contributors\nSTATE_DESCRIPTIONS = {\n 'new': _('The evaluation was newly created and will be prepared by the evaluation team.'),\n 'prepared': _('The evaluation was prepared by the evaluation team and is now available for editors.'),\n 'editor_approved': _('The evaluation was approved by an editor and will now be checked by the evaluation team.'),\n 'approved': _('All preparations are finished. The evaluation will begin once the defined start date is reached.'),\n 'in_evaluation': _('The evaluation is currently running until the defined end date is reached.'),\n 'evaluated': _('The evaluation has finished and will now be reviewed by the evaluation team.'),\n 'reviewed': _('The evaluation has finished and was reviewed by the evaluation team. You will receive an email when its results are published.'),\n 'published': _('The results for this evaluation have been published.'),\n}\n\n\n# values for approval states shown to staff\nStateValues = namedtuple('StateValues', ('order', 'icon', 'filter', 'description'))\nAPPROVAL_STATES = {\n 'new': StateValues(0, 'fas fa-circle icon-yellow', 'fa-circle icon-yellow', _('In preparation')),\n 'prepared': StateValues(2, 'far fa-square icon-gray', 'fa-square icon-gray', _('Awaiting editor review')),\n 'editor_approved': StateValues(1, 'far fa-check-square icon-yellow', 'fa-check-square icon-yellow', _('Approved by editor, awaiting manager review')),\n 'approved': StateValues(3, 'far fa-check-square icon-green', 'fa-check-square icon-green', _('Approved by manager')),\n}\n\n\nregister = Library()\n\n\[email protected](name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\[email protected]()\ndef zip_choices(counts, choices):\n return zip(counts, choices.names, choices.colors, choices.values)\n\n\[email protected]\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\[email protected]\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\[email protected]\ndef statename(state):\n return STATE_NAMES.get(state)\n\n\[email protected]\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\[email protected]\ndef approval_state_values(state):\n if state in APPROVAL_STATES:\n return APPROVAL_STATES[state]\n elif state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:\n return APPROVAL_STATES['approved']\n return None\n\n\[email protected]\ndef approval_state_icon(state):\n if state in APPROVAL_STATES:\n return APPROVAL_STATES[state].icon\n elif state in ['in_evaluation', 'evaluated', 'reviewed', 'published']:\n return APPROVAL_STATES['approved'].icon\n return None\n\n\[email protected]\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\[email protected](name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\[email protected]\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\[email protected]\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\[email protected]\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\[email protected]\ndef is_user_responsible_or_contributor_or_delegate(evaluation, user):\n return evaluation.is_user_responsible_or_contributor_or_delegate(user)\n\[email protected]\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\[email protected]\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\[email protected]\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py"}]} | 2,526 | 346 |
gh_patches_debug_1918 | rasdani/github-patches | git_diff | projectmesa__mesa-1844 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jupyterviz checkbox input change is not propagated
</issue>
<code>
[start of mesa/experimental/jupyter_viz.py]
1 import threading
2
3 import matplotlib.pyplot as plt
4 import networkx as nx
5 import reacton.ipywidgets as widgets
6 import solara
7 from matplotlib.figure import Figure
8 from matplotlib.ticker import MaxNLocator
9
10 import mesa
11
12 # Avoid interactive backend
13 plt.switch_backend("agg")
14
15
16 @solara.component
17 def JupyterViz(
18 model_class,
19 model_params,
20 measures=None,
21 name="Mesa Model",
22 agent_portrayal=None,
23 space_drawer="default",
24 play_interval=150,
25 ):
26 """Initialize a component to visualize a model.
27 Args:
28 model_class: class of the model to instantiate
29 model_params: parameters for initializing the model
30 measures: list of callables or data attributes to plot
31 name: name for display
32 agent_portrayal: options for rendering agents (dictionary)
33 space_drawer: method to render the agent space for
34 the model; default implementation is :meth:`make_space`;
35 simulations with no space to visualize should
36 specify `space_drawer=False`
37 play_interval: play interval (default: 150)
38 """
39
40 current_step, set_current_step = solara.use_state(0)
41
42 # 1. Set up model parameters
43 user_params, fixed_params = split_model_params(model_params)
44 model_parameters, set_model_parameters = solara.use_state(
45 {**fixed_params, **{k: v["value"] for k, v in user_params.items()}}
46 )
47
48 # 2. Set up Model
49 def make_model():
50 model = model_class(**model_parameters)
51 set_current_step(0)
52 return model
53
54 reset_counter = solara.use_reactive(0)
55 model = solara.use_memo(
56 make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]
57 )
58
59 def handle_change_model_params(name: str, value: any):
60 set_model_parameters({**model_parameters, name: value})
61
62 # 3. Set up UI
63 solara.Markdown(name)
64 UserInputs(user_params, on_change=handle_change_model_params)
65 ModelController(model, play_interval, current_step, set_current_step, reset_counter)
66
67 with solara.GridFixed(columns=2):
68 # 4. Space
69 if space_drawer == "default":
70 # draw with the default implementation
71 make_space(model, agent_portrayal)
72 elif space_drawer:
73 # if specified, draw agent space with an alternate renderer
74 space_drawer(model, agent_portrayal)
75 # otherwise, do nothing (do not draw space)
76
77 # 5. Plots
78 for measure in measures:
79 if callable(measure):
80 # Is a custom object
81 measure(model)
82 else:
83 make_plot(model, measure)
84
85
86 @solara.component
87 def ModelController(
88 model, play_interval, current_step, set_current_step, reset_counter
89 ):
90 playing = solara.use_reactive(False)
91 thread = solara.use_reactive(None)
92 # We track the previous step to detect if user resets the model via
93 # clicking the reset button or changing the parameters. If previous_step >
94 # current_step, it means a model reset happens while the simulation is
95 # still playing.
96 previous_step = solara.use_reactive(0)
97
98 def on_value_play(change):
99 if previous_step.value > current_step and current_step == 0:
100 # We add extra checks for current_step == 0, just to be sure.
101 # We automatically stop the playing if a model is reset.
102 playing.value = False
103 elif model.running:
104 do_step()
105 else:
106 playing.value = False
107
108 def do_step():
109 model.step()
110 previous_step.value = current_step
111 set_current_step(model.schedule.steps)
112
113 def do_play():
114 model.running = True
115 while model.running:
116 do_step()
117
118 def threaded_do_play():
119 if thread is not None and thread.is_alive():
120 return
121 thread.value = threading.Thread(target=do_play)
122 thread.start()
123
124 def do_pause():
125 if (thread is None) or (not thread.is_alive()):
126 return
127 model.running = False
128 thread.join()
129
130 def do_reset():
131 reset_counter.value += 1
132
133 with solara.Row():
134 solara.Button(label="Step", color="primary", on_click=do_step)
135 # This style is necessary so that the play widget has almost the same
136 # height as typical Solara buttons.
137 solara.Style(
138 """
139 .widget-play {
140 height: 30px;
141 }
142 """
143 )
144 widgets.Play(
145 value=0,
146 interval=play_interval,
147 repeat=True,
148 show_repeat=False,
149 on_value=on_value_play,
150 playing=playing.value,
151 on_playing=playing.set,
152 )
153 solara.Button(label="Reset", color="primary", on_click=do_reset)
154 solara.Markdown(md_text=f"**Step:** {current_step}")
155 # threaded_do_play is not used for now because it
156 # doesn't work in Google colab. We use
157 # ipywidgets.Play until it is fixed. The threading
158 # version is definite a much better implementation,
159 # if it works.
160 # solara.Button(label="▶", color="primary", on_click=viz.threaded_do_play)
161 # solara.Button(label="⏸︎", color="primary", on_click=viz.do_pause)
162 # solara.Button(label="Reset", color="primary", on_click=do_reset)
163
164
165 def split_model_params(model_params):
166 model_params_input = {}
167 model_params_fixed = {}
168 for k, v in model_params.items():
169 if check_param_is_fixed(v):
170 model_params_fixed[k] = v
171 else:
172 model_params_input[k] = v
173 return model_params_input, model_params_fixed
174
175
176 def check_param_is_fixed(param):
177 if not isinstance(param, dict):
178 return True
179 if "type" not in param:
180 return True
181
182
183 @solara.component
184 def UserInputs(user_params, on_change=None):
185 """Initialize user inputs for configurable model parameters.
186 Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,
187 :class:`solara.Select`, and :class:`solara.Checkbox`.
188
189 Props:
190 user_params: dictionary with options for the input, including label,
191 min and max values, and other fields specific to the input type.
192 on_change: function to be called with (name, value) when the value of an input changes.
193 """
194
195 for name, options in user_params.items():
196 # label for the input is "label" from options or name
197 label = options.get("label", name)
198 input_type = options.get("type")
199
200 def change_handler(value, name=name):
201 on_change(name, value)
202
203 if input_type == "SliderInt":
204 solara.SliderInt(
205 label,
206 value=options.get("value"),
207 on_value=change_handler,
208 min=options.get("min"),
209 max=options.get("max"),
210 step=options.get("step"),
211 )
212 elif input_type == "SliderFloat":
213 solara.SliderFloat(
214 label,
215 value=options.get("value"),
216 on_value=change_handler,
217 min=options.get("min"),
218 max=options.get("max"),
219 step=options.get("step"),
220 )
221 elif input_type == "Select":
222 solara.Select(
223 label,
224 value=options.get("value"),
225 on_value=change_handler,
226 values=options.get("values"),
227 )
228 elif input_type == "Checkbox":
229 solara.Checkbox(
230 label=label,
231 value=options.get("value"),
232 )
233 else:
234 raise ValueError(f"{input_type} is not a supported input type")
235
236
237 def make_space(model, agent_portrayal):
238 space_fig = Figure()
239 space_ax = space_fig.subplots()
240 space = getattr(model, "grid", None)
241 if space is None:
242 # Sometimes the space is defined as model.space instead of model.grid
243 space = model.space
244 if isinstance(space, mesa.space.NetworkGrid):
245 _draw_network_grid(space, space_ax, agent_portrayal)
246 elif isinstance(space, mesa.space.ContinuousSpace):
247 _draw_continuous_space(space, space_ax, agent_portrayal)
248 else:
249 _draw_grid(space, space_ax, agent_portrayal)
250 space_ax.set_axis_off()
251 solara.FigureMatplotlib(space_fig, format="png")
252
253
254 def _draw_grid(space, space_ax, agent_portrayal):
255 def portray(g):
256 x = []
257 y = []
258 s = [] # size
259 c = [] # color
260 for i in range(g.width):
261 for j in range(g.height):
262 content = g._grid[i][j]
263 if not content:
264 continue
265 if not hasattr(content, "__iter__"):
266 # Is a single grid
267 content = [content]
268 for agent in content:
269 data = agent_portrayal(agent)
270 x.append(i)
271 y.append(j)
272 if "size" in data:
273 s.append(data["size"])
274 if "color" in data:
275 c.append(data["color"])
276 out = {"x": x, "y": y}
277 if len(s) > 0:
278 out["s"] = s
279 if len(c) > 0:
280 out["c"] = c
281 return out
282
283 space_ax.scatter(**portray(space))
284
285
286 def _draw_network_grid(space, space_ax, agent_portrayal):
287 graph = space.G
288 pos = nx.spring_layout(graph, seed=0)
289 nx.draw(
290 graph,
291 ax=space_ax,
292 pos=pos,
293 **agent_portrayal(graph),
294 )
295
296
297 def _draw_continuous_space(space, space_ax, agent_portrayal):
298 def portray(space):
299 x = []
300 y = []
301 s = [] # size
302 c = [] # color
303 for agent in space._agent_to_index:
304 data = agent_portrayal(agent)
305 _x, _y = agent.pos
306 x.append(_x)
307 y.append(_y)
308 if "size" in data:
309 s.append(data["size"])
310 if "color" in data:
311 c.append(data["color"])
312 out = {"x": x, "y": y}
313 if len(s) > 0:
314 out["s"] = s
315 if len(c) > 0:
316 out["c"] = c
317 return out
318
319 space_ax.scatter(**portray(space))
320
321
322 def make_plot(model, measure):
323 fig = Figure()
324 ax = fig.subplots()
325 df = model.datacollector.get_model_vars_dataframe()
326 ax.plot(df.loc[:, measure])
327 ax.set_ylabel(measure)
328 # Set integer x axis
329 ax.xaxis.set_major_locator(MaxNLocator(integer=True))
330 solara.FigureMatplotlib(fig)
331
332
333 def make_text(renderer):
334 def function(model):
335 solara.Markdown(renderer(model))
336
337 return function
338
[end of mesa/experimental/jupyter_viz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py
--- a/mesa/experimental/jupyter_viz.py
+++ b/mesa/experimental/jupyter_viz.py
@@ -228,6 +228,7 @@
elif input_type == "Checkbox":
solara.Checkbox(
label=label,
+ on_value=change_handler,
value=options.get("value"),
)
else:
| {"golden_diff": "diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py\n--- a/mesa/experimental/jupyter_viz.py\n+++ b/mesa/experimental/jupyter_viz.py\n@@ -228,6 +228,7 @@\n elif input_type == \"Checkbox\":\n solara.Checkbox(\n label=label,\n+ on_value=change_handler,\n value=options.get(\"value\"),\n )\n else:\n", "issue": "jupyterviz checkbox input change is not propagated\n\n", "before_files": [{"content": "import threading\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport reacton.ipywidgets as widgets\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n# Avoid interactive backend\nplt.switch_backend(\"agg\")\n\n\[email protected]\ndef JupyterViz(\n model_class,\n model_params,\n measures=None,\n name=\"Mesa Model\",\n agent_portrayal=None,\n space_drawer=\"default\",\n play_interval=150,\n):\n \"\"\"Initialize a component to visualize a model.\n Args:\n model_class: class of the model to instantiate\n model_params: parameters for initializing the model\n measures: list of callables or data attributes to plot\n name: name for display\n agent_portrayal: options for rendering agents (dictionary)\n space_drawer: method to render the agent space for\n the model; default implementation is :meth:`make_space`;\n simulations with no space to visualize should\n specify `space_drawer=False`\n play_interval: play interval (default: 150)\n \"\"\"\n\n current_step, set_current_step = solara.use_state(0)\n\n # 1. Set up model parameters\n user_params, fixed_params = split_model_params(model_params)\n model_parameters, set_model_parameters = solara.use_state(\n {**fixed_params, **{k: v[\"value\"] for k, v in user_params.items()}}\n )\n\n # 2. Set up Model\n def make_model():\n model = model_class(**model_parameters)\n set_current_step(0)\n return model\n\n reset_counter = solara.use_reactive(0)\n model = solara.use_memo(\n make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]\n )\n\n def handle_change_model_params(name: str, value: any):\n set_model_parameters({**model_parameters, name: value})\n\n # 3. Set up UI\n solara.Markdown(name)\n UserInputs(user_params, on_change=handle_change_model_params)\n ModelController(model, play_interval, current_step, set_current_step, reset_counter)\n\n with solara.GridFixed(columns=2):\n # 4. Space\n if space_drawer == \"default\":\n # draw with the default implementation\n make_space(model, agent_portrayal)\n elif space_drawer:\n # if specified, draw agent space with an alternate renderer\n space_drawer(model, agent_portrayal)\n # otherwise, do nothing (do not draw space)\n\n # 5. Plots\n for measure in measures:\n if callable(measure):\n # Is a custom object\n measure(model)\n else:\n make_plot(model, measure)\n\n\[email protected]\ndef ModelController(\n model, play_interval, current_step, set_current_step, reset_counter\n):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n # We track the previous step to detect if user resets the model via\n # clicking the reset button or changing the parameters. If previous_step >\n # current_step, it means a model reset happens while the simulation is\n # still playing.\n previous_step = solara.use_reactive(0)\n\n def on_value_play(change):\n if previous_step.value > current_step and current_step == 0:\n # We add extra checks for current_step == 0, just to be sure.\n # We automatically stop the playing if a model is reset.\n playing.value = False\n elif model.running:\n do_step()\n else:\n playing.value = False\n\n def do_step():\n model.step()\n previous_step.value = current_step\n set_current_step(model.schedule.steps)\n\n def do_play():\n model.running = True\n while model.running:\n do_step()\n\n def threaded_do_play():\n if thread is not None and thread.is_alive():\n return\n thread.value = threading.Thread(target=do_play)\n thread.start()\n\n def do_pause():\n if (thread is None) or (not thread.is_alive()):\n return\n model.running = False\n thread.join()\n\n def do_reset():\n reset_counter.value += 1\n\n with solara.Row():\n solara.Button(label=\"Step\", color=\"primary\", on_click=do_step)\n # This style is necessary so that the play widget has almost the same\n # height as typical Solara buttons.\n solara.Style(\n \"\"\"\n .widget-play {\n height: 30px;\n }\n \"\"\"\n )\n widgets.Play(\n value=0,\n interval=play_interval,\n repeat=True,\n show_repeat=False,\n on_value=on_value_play,\n playing=playing.value,\n on_playing=playing.set,\n )\n solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n solara.Markdown(md_text=f\"**Step:** {current_step}\")\n # threaded_do_play is not used for now because it\n # doesn't work in Google colab. We use\n # ipywidgets.Play until it is fixed. The threading\n # version is definite a much better implementation,\n # if it works.\n # solara.Button(label=\"\u25b6\", color=\"primary\", on_click=viz.threaded_do_play)\n # solara.Button(label=\"\u23f8\ufe0e\", color=\"primary\", on_click=viz.do_pause)\n # solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n\n\ndef split_model_params(model_params):\n model_params_input = {}\n model_params_fixed = {}\n for k, v in model_params.items():\n if check_param_is_fixed(v):\n model_params_fixed[k] = v\n else:\n model_params_input[k] = v\n return model_params_input, model_params_fixed\n\n\ndef check_param_is_fixed(param):\n if not isinstance(param, dict):\n return True\n if \"type\" not in param:\n return True\n\n\[email protected]\ndef UserInputs(user_params, on_change=None):\n \"\"\"Initialize user inputs for configurable model parameters.\n Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,\n :class:`solara.Select`, and :class:`solara.Checkbox`.\n\n Props:\n user_params: dictionary with options for the input, including label,\n min and max values, and other fields specific to the input type.\n on_change: function to be called with (name, value) when the value of an input changes.\n \"\"\"\n\n for name, options in user_params.items():\n # label for the input is \"label\" from options or name\n label = options.get(\"label\", name)\n input_type = options.get(\"type\")\n\n def change_handler(value, name=name):\n on_change(name, value)\n\n if input_type == \"SliderInt\":\n solara.SliderInt(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"SliderFloat\":\n solara.SliderFloat(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"Select\":\n solara.Select(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n values=options.get(\"values\"),\n )\n elif input_type == \"Checkbox\":\n solara.Checkbox(\n label=label,\n value=options.get(\"value\"),\n )\n else:\n raise ValueError(f\"{input_type} is not a supported input type\")\n\n\ndef make_space(model, agent_portrayal):\n space_fig = Figure()\n space_ax = space_fig.subplots()\n space = getattr(model, \"grid\", None)\n if space is None:\n # Sometimes the space is defined as model.space instead of model.grid\n space = model.space\n if isinstance(space, mesa.space.NetworkGrid):\n _draw_network_grid(space, space_ax, agent_portrayal)\n elif isinstance(space, mesa.space.ContinuousSpace):\n _draw_continuous_space(space, space_ax, agent_portrayal)\n else:\n _draw_grid(space, space_ax, agent_portrayal)\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig, format=\"png\")\n\n\ndef _draw_grid(space, space_ax, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef _draw_network_grid(space, space_ax, agent_portrayal):\n graph = space.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef _draw_continuous_space(space, space_ax, agent_portrayal):\n def portray(space):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for agent in space._agent_to_index:\n data = agent_portrayal(agent)\n _x, _y = agent.pos\n x.append(_x)\n y.append(_y)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n\n\ndef make_text(renderer):\n def function(model):\n solara.Markdown(renderer(model))\n\n return function\n", "path": "mesa/experimental/jupyter_viz.py"}]} | 3,843 | 100 |
gh_patches_debug_12756 | rasdani/github-patches | git_diff | xorbitsai__inference-351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FEAT: support WizardMath v1.0
### Is your feature request related to a problem? Please describe
https://huggingface.co/WizardLM/WizardMath-13B-V1.0
</issue>
<code>
[start of xinference/model/llm/utils.py]
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Iterator, List
16
17 from xinference.model.llm.llm_family import PromptStyleV1
18
19 from ...types import (
20 ChatCompletion,
21 ChatCompletionChunk,
22 ChatCompletionMessage,
23 Completion,
24 CompletionChunk,
25 )
26
27
28 class ChatModelMixin:
29 @staticmethod
30 def get_prompt(
31 prompt: str,
32 chat_history: List[ChatCompletionMessage],
33 prompt_style: PromptStyleV1,
34 ) -> str:
35 """
36 Inspired by FastChat. Format chat history into a prompt according to the prompty style of
37 different models.
38 """
39 assert prompt_style.roles is not None
40 chat_history.append(
41 ChatCompletionMessage(role=prompt_style.roles[0], content=prompt)
42 )
43 chat_history.append(
44 ChatCompletionMessage(role=prompt_style.roles[1], content="")
45 )
46
47 if prompt_style.style_name == "ADD_COLON_SINGLE":
48 ret = prompt_style.system_prompt + prompt_style.intra_message_sep
49 for message in chat_history:
50 role = message["role"]
51 content = message["content"]
52 if content:
53 ret += role + ": " + content + prompt_style.intra_message_sep
54 else:
55 ret += role + ":"
56 return ret
57 elif prompt_style.style_name == "ADD_COLON_TWO":
58 seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]
59 ret = prompt_style.system_prompt + seps[0]
60 for i, message in enumerate(chat_history):
61 role = message["role"]
62 content = message["content"]
63 if content:
64 ret += role + ": " + content + seps[i % 2]
65 else:
66 ret += role + ":"
67 return ret
68 elif prompt_style.style_name == "NO_COLON_TWO":
69 seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]
70 ret = prompt_style.system_prompt
71 for i, message in enumerate(chat_history):
72 role = message["role"]
73 content = message["content"]
74 if content:
75 ret += role + content + seps[i % 2]
76 else:
77 ret += role
78 return ret
79 elif prompt_style.style_name == "LLAMA2":
80 seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]
81 ret = ""
82 for i, message in enumerate(chat_history):
83 role = message["role"]
84 content = message["content"]
85 if content:
86 if i == 0:
87 ret += prompt_style.system_prompt + content
88 else:
89 ret += role + " " + content + seps[i % 2]
90 else:
91 ret += role
92 return ret
93 elif prompt_style.style_name == "FALCON":
94 ret = prompt_style.system_prompt
95 for message in chat_history:
96 role = message["role"]
97 content = message["content"]
98 if content:
99 ret += (
100 role
101 + ": "
102 + content.replace("\r\n", "\n").replace("\n\n", "\n")
103 )
104 ret += "\n\n"
105 else:
106 ret += role + ":"
107 return ret
108 elif prompt_style.style_name == "CHATGLM":
109 round_add_n = 1 if prompt_style.intra_message_sep == "\n\n" else 0
110 if prompt_style.system_prompt:
111 ret = prompt_style.system_prompt + prompt_style.intra_message_sep
112 else:
113 ret = ""
114 for i, message in enumerate(chat_history):
115 role = message["role"]
116 content = message["content"]
117 if i % 2 == 0:
118 ret += f"[Round {i // 2 + round_add_n}]{prompt_style.intra_message_sep}"
119 if content:
120 ret += role + ":" + content + prompt_style.intra_message_sep
121 else:
122 ret += role + ":"
123 return ret
124 elif prompt_style.style_name == "QWEN":
125 ret = f"<|im_start|>system\n{prompt_style.system_prompt}<|im_end|>"
126 for message in chat_history:
127 role = message["role"]
128 content = message["content"]
129
130 ret += prompt_style.intra_message_sep
131 if content:
132 ret += f"<|im_start|>{role}\n{content}<|im_end|>"
133 else:
134 ret += f"<|im_start|>{role}\n"
135 return ret
136 elif prompt_style.style_name == "CHATML":
137 ret = (
138 ""
139 if prompt_style.system_prompt == ""
140 else prompt_style.system_prompt + prompt_style.intra_message_sep + "\n"
141 )
142 for message in chat_history:
143 role = message["role"]
144 content = message["content"]
145
146 if content:
147 ret += role + "\n" + content + prompt_style.intra_message_sep + "\n"
148 else:
149 ret += role + "\n"
150 return ret
151 elif prompt_style.style_name == "INTERNLM":
152 seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]
153 ret = ""
154 for i, message in enumerate(chat_history[:-2]):
155 if i % 2 == 0:
156 ret += "<s>"
157 role = message["role"]
158 content = message["content"]
159 ret += role + ":" + content + seps[i % 2]
160 if len(ret) == 0:
161 ret += "<s>"
162 ret += (
163 chat_history[-2]["role"] + ":" + chat_history[-2]["content"] + seps[0]
164 )
165 ret += chat_history[-1]["role"] + ":"
166 return ret
167 else:
168 raise ValueError(f"Invalid prompt style: {prompt_style.style_name}")
169
170 @staticmethod
171 def _convert_chat_completion_chunks_to_chat(
172 chunks: Iterator[CompletionChunk],
173 ) -> Iterator[ChatCompletionChunk]:
174 for i, chunk in enumerate(chunks):
175 if i == 0:
176 yield {
177 "id": "chat" + chunk["id"],
178 "model": chunk["model"],
179 "created": chunk["created"],
180 "object": "chat.completion.chunk",
181 "choices": [
182 {
183 "index": 0,
184 "delta": {
185 "role": "assistant",
186 },
187 "finish_reason": None,
188 }
189 ],
190 }
191 yield {
192 "id": "chat" + chunk["id"],
193 "model": chunk["model"],
194 "created": chunk["created"],
195 "object": "chat.completion.chunk",
196 "choices": [
197 {
198 "index": 0,
199 "delta": {
200 "content": chunk["choices"][0]["text"],
201 },
202 "finish_reason": chunk["choices"][0]["finish_reason"],
203 }
204 ],
205 }
206
207 @staticmethod
208 def _convert_text_completion_to_chat(completion: Completion) -> ChatCompletion:
209 return {
210 "id": "chat" + completion["id"],
211 "object": "chat.completion",
212 "created": completion["created"],
213 "model": completion["model"],
214 "choices": [
215 {
216 "index": 0,
217 "message": {
218 "role": "assistant",
219 "content": completion["choices"][0]["text"],
220 },
221 "finish_reason": completion["choices"][0]["finish_reason"],
222 }
223 ],
224 "usage": completion["usage"],
225 }
226
227
228 def is_valid_model_name(model_name: str) -> bool:
229 import re
230
231 return re.match(r"^[A-Za-z0-9][A-Za-z0-9_\-]*$", model_name) is not None
232
[end of xinference/model/llm/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xinference/model/llm/utils.py b/xinference/model/llm/utils.py
--- a/xinference/model/llm/utils.py
+++ b/xinference/model/llm/utils.py
@@ -164,6 +164,16 @@
)
ret += chat_history[-1]["role"] + ":"
return ret
+ elif prompt_style.style_name == "ADD_COLON_SINGLE_COT":
+ ret = prompt_style.system_prompt + prompt_style.intra_message_sep
+ for message in chat_history:
+ role = message["role"]
+ content = message["content"]
+ if content:
+ ret += role + ": " + content + prompt_style.intra_message_sep
+ else:
+ ret += role + ": Let's think step by step."
+ return ret
else:
raise ValueError(f"Invalid prompt style: {prompt_style.style_name}")
| {"golden_diff": "diff --git a/xinference/model/llm/utils.py b/xinference/model/llm/utils.py\n--- a/xinference/model/llm/utils.py\n+++ b/xinference/model/llm/utils.py\n@@ -164,6 +164,16 @@\n )\n ret += chat_history[-1][\"role\"] + \":\"\n return ret\n+ elif prompt_style.style_name == \"ADD_COLON_SINGLE_COT\":\n+ ret = prompt_style.system_prompt + prompt_style.intra_message_sep\n+ for message in chat_history:\n+ role = message[\"role\"]\n+ content = message[\"content\"]\n+ if content:\n+ ret += role + \": \" + content + prompt_style.intra_message_sep\n+ else:\n+ ret += role + \": Let's think step by step.\"\n+ return ret\n else:\n raise ValueError(f\"Invalid prompt style: {prompt_style.style_name}\")\n", "issue": "FEAT: support WizardMath v1.0\n### Is your feature request related to a problem? Please describe\r\nhttps://huggingface.co/WizardLM/WizardMath-13B-V1.0\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Iterator, List\n\nfrom xinference.model.llm.llm_family import PromptStyleV1\n\nfrom ...types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n Completion,\n CompletionChunk,\n)\n\n\nclass ChatModelMixin:\n @staticmethod\n def get_prompt(\n prompt: str,\n chat_history: List[ChatCompletionMessage],\n prompt_style: PromptStyleV1,\n ) -> str:\n \"\"\"\n Inspired by FastChat. Format chat history into a prompt according to the prompty style of\n different models.\n \"\"\"\n assert prompt_style.roles is not None\n chat_history.append(\n ChatCompletionMessage(role=prompt_style.roles[0], content=prompt)\n )\n chat_history.append(\n ChatCompletionMessage(role=prompt_style.roles[1], content=\"\")\n )\n\n if prompt_style.style_name == \"ADD_COLON_SINGLE\":\n ret = prompt_style.system_prompt + prompt_style.intra_message_sep\n for message in chat_history:\n role = message[\"role\"]\n content = message[\"content\"]\n if content:\n ret += role + \": \" + content + prompt_style.intra_message_sep\n else:\n ret += role + \":\"\n return ret\n elif prompt_style.style_name == \"ADD_COLON_TWO\":\n seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]\n ret = prompt_style.system_prompt + seps[0]\n for i, message in enumerate(chat_history):\n role = message[\"role\"]\n content = message[\"content\"]\n if content:\n ret += role + \": \" + content + seps[i % 2]\n else:\n ret += role + \":\"\n return ret\n elif prompt_style.style_name == \"NO_COLON_TWO\":\n seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]\n ret = prompt_style.system_prompt\n for i, message in enumerate(chat_history):\n role = message[\"role\"]\n content = message[\"content\"]\n if content:\n ret += role + content + seps[i % 2]\n else:\n ret += role\n return ret\n elif prompt_style.style_name == \"LLAMA2\":\n seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]\n ret = \"\"\n for i, message in enumerate(chat_history):\n role = message[\"role\"]\n content = message[\"content\"]\n if content:\n if i == 0:\n ret += prompt_style.system_prompt + content\n else:\n ret += role + \" \" + content + seps[i % 2]\n else:\n ret += role\n return ret\n elif prompt_style.style_name == \"FALCON\":\n ret = prompt_style.system_prompt\n for message in chat_history:\n role = message[\"role\"]\n content = message[\"content\"]\n if content:\n ret += (\n role\n + \": \"\n + content.replace(\"\\r\\n\", \"\\n\").replace(\"\\n\\n\", \"\\n\")\n )\n ret += \"\\n\\n\"\n else:\n ret += role + \":\"\n return ret\n elif prompt_style.style_name == \"CHATGLM\":\n round_add_n = 1 if prompt_style.intra_message_sep == \"\\n\\n\" else 0\n if prompt_style.system_prompt:\n ret = prompt_style.system_prompt + prompt_style.intra_message_sep\n else:\n ret = \"\"\n for i, message in enumerate(chat_history):\n role = message[\"role\"]\n content = message[\"content\"]\n if i % 2 == 0:\n ret += f\"[Round {i // 2 + round_add_n}]{prompt_style.intra_message_sep}\"\n if content:\n ret += role + \"\uff1a\" + content + prompt_style.intra_message_sep\n else:\n ret += role + \"\uff1a\"\n return ret\n elif prompt_style.style_name == \"QWEN\":\n ret = f\"<|im_start|>system\\n{prompt_style.system_prompt}<|im_end|>\"\n for message in chat_history:\n role = message[\"role\"]\n content = message[\"content\"]\n\n ret += prompt_style.intra_message_sep\n if content:\n ret += f\"<|im_start|>{role}\\n{content}<|im_end|>\"\n else:\n ret += f\"<|im_start|>{role}\\n\"\n return ret\n elif prompt_style.style_name == \"CHATML\":\n ret = (\n \"\"\n if prompt_style.system_prompt == \"\"\n else prompt_style.system_prompt + prompt_style.intra_message_sep + \"\\n\"\n )\n for message in chat_history:\n role = message[\"role\"]\n content = message[\"content\"]\n\n if content:\n ret += role + \"\\n\" + content + prompt_style.intra_message_sep + \"\\n\"\n else:\n ret += role + \"\\n\"\n return ret\n elif prompt_style.style_name == \"INTERNLM\":\n seps = [prompt_style.intra_message_sep, prompt_style.inter_message_sep]\n ret = \"\"\n for i, message in enumerate(chat_history[:-2]):\n if i % 2 == 0:\n ret += \"<s>\"\n role = message[\"role\"]\n content = message[\"content\"]\n ret += role + \":\" + content + seps[i % 2]\n if len(ret) == 0:\n ret += \"<s>\"\n ret += (\n chat_history[-2][\"role\"] + \":\" + chat_history[-2][\"content\"] + seps[0]\n )\n ret += chat_history[-1][\"role\"] + \":\"\n return ret\n else:\n raise ValueError(f\"Invalid prompt style: {prompt_style.style_name}\")\n\n @staticmethod\n def _convert_chat_completion_chunks_to_chat(\n chunks: Iterator[CompletionChunk],\n ) -> Iterator[ChatCompletionChunk]:\n for i, chunk in enumerate(chunks):\n if i == 0:\n yield {\n \"id\": \"chat\" + chunk[\"id\"],\n \"model\": chunk[\"model\"],\n \"created\": chunk[\"created\"],\n \"object\": \"chat.completion.chunk\",\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n yield {\n \"id\": \"chat\" + chunk[\"id\"],\n \"model\": chunk[\"model\"],\n \"created\": chunk[\"created\"],\n \"object\": \"chat.completion.chunk\",\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": chunk[\"choices\"][0][\"text\"],\n },\n \"finish_reason\": chunk[\"choices\"][0][\"finish_reason\"],\n }\n ],\n }\n\n @staticmethod\n def _convert_text_completion_to_chat(completion: Completion) -> ChatCompletion:\n return {\n \"id\": \"chat\" + completion[\"id\"],\n \"object\": \"chat.completion\",\n \"created\": completion[\"created\"],\n \"model\": completion[\"model\"],\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": completion[\"choices\"][0][\"text\"],\n },\n \"finish_reason\": completion[\"choices\"][0][\"finish_reason\"],\n }\n ],\n \"usage\": completion[\"usage\"],\n }\n\n\ndef is_valid_model_name(model_name: str) -> bool:\n import re\n\n return re.match(r\"^[A-Za-z0-9][A-Za-z0-9_\\-]*$\", model_name) is not None\n", "path": "xinference/model/llm/utils.py"}]} | 2,921 | 202 |
gh_patches_debug_20648 | rasdani/github-patches | git_diff | microsoft__ptvsd-1253 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PTVSD_LOG_DIR doesn't work with VS
No logs are generated even with the environment variable set. It looks like logging initialization is missing on the VS entry point (`debugger.py`).
</issue>
<code>
[start of src/ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7 from ptvsd._local import run_module, run_file, run_main
8
9
10 # TODO: not needed?
11 DONT_DEBUG = []
12
13 LOCALHOST = 'localhost'
14
15 RUNNERS = {
16 'module': run_module, # python -m spam
17 'script': run_file, # python spam.py
18 'code': run_file, # python -c 'print("spam")'
19 None: run_file, # catchall
20 }
21
22
23 def debug(filename, port_num, debug_id, debug_options, run_as,
24 _runners=RUNNERS, _extra=None, *args, **kwargs):
25 # TODO: docstring
26 if _extra is None:
27 _extra = sys.argv[1:]
28 address = (LOCALHOST, port_num)
29 try:
30 run = _runners[run_as]
31 except KeyError:
32 # TODO: fail?
33 run = _runners[None]
34 if _extra:
35 args = _extra + list(args)
36 kwargs.setdefault('singlesession', True)
37 run(address, filename, *args, **kwargs)
38
39
40 def run(filename, port_num, run_as,
41 *args, **kwargs):
42 address = (LOCALHOST, port_num)
43 run_main(address, filename, run_as, *args, **kwargs)
44
[end of src/ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py
--- a/src/ptvsd/debugger.py
+++ b/src/ptvsd/debugger.py
@@ -4,6 +4,7 @@
import sys
+import ptvsd.log
from ptvsd._local import run_module, run_file, run_main
@@ -22,7 +23,10 @@
def debug(filename, port_num, debug_id, debug_options, run_as,
_runners=RUNNERS, _extra=None, *args, **kwargs):
- # TODO: docstring
+
+ ptvsd.log.to_file()
+ ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))
+
if _extra is None:
_extra = sys.argv[1:]
address = (LOCALHOST, port_num)
@@ -39,5 +43,9 @@
def run(filename, port_num, run_as,
*args, **kwargs):
+
+ ptvsd.log.to_file()
+ ptvsd.log.info('run{0!r}', (filename, port_num, run_as))
+
address = (LOCALHOST, port_num)
run_main(address, filename, run_as, *args, **kwargs)
| {"golden_diff": "diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py\n--- a/src/ptvsd/debugger.py\n+++ b/src/ptvsd/debugger.py\n@@ -4,6 +4,7 @@\n \n import sys\n \n+import ptvsd.log\n from ptvsd._local import run_module, run_file, run_main\n \n \n@@ -22,7 +23,10 @@\n \n def debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n- # TODO: docstring\n+\n+ ptvsd.log.to_file()\n+ ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n+\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n@@ -39,5 +43,9 @@\n \n def run(filename, port_num, run_as,\n *args, **kwargs):\n+\n+ ptvsd.log.to_file()\n+ ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n+\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "issue": "PTVSD_LOG_DIR doesn't work with VS\nNo logs are generated even with the environment variable set. It looks like logging initialization is missing on the VS entry point (`debugger.py`).\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nfrom ptvsd._local import run_module, run_file, run_main\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n # TODO: docstring\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n\n\ndef run(filename, port_num, run_as,\n *args, **kwargs):\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "path": "src/ptvsd/debugger.py"}]} | 978 | 295 |
gh_patches_debug_36977 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4942 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed to run check CKV_AWS_224: TemplateAttributeError: get is invalid
**Describe the issue**
Error occurs when checked ECS Cluster using terraform_plan framework.
**Examples**
```
module "cluster" {
source = "terraform-aws-modules/ecs/aws"
version = "4.1.3"
cluster_name = "foo"
fargate_capacity_providers = {
FARGATE = {}
}
}
```
**Version (please complete the following information):**
- checkov 2.3.165
- terraform 1.4.5
- aws provider 4.63.0
**Additional context**
traceback:
```
2023-04-18 09:53:09,676 [MainThread ] [ERROR] Failed to run check CKV_AWS_224 on /tfplan.json:aws_ecs_cluster.this
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/checkov/common/checks/base_check.py", line 73, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 43, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py", line 21, in scan_resource_conf
if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
File "/usr/local/lib/python3.9/site-packages/checkov/common/parsers/node.py", line 189, in __getattr__
raise TemplateAttributeError(f'{name} is invalid')
checkov.common.parsers.node.TemplateAttributeError: get is invalid
```
This only occurs when using terraform_plan framework. It works without issue when using vanilla terraform framework.
The plan generation is just `terraform plan -out tfplan.bin && terraform show -json tfplan.bin > tfplan.json` then running `checkof -f tfplan.json`.
Here is my checkov config file in repo:
```
➜ cat .checkov.yaml
block-list-secret-scan: []
compact: true
download-external-modules: true
evaluate-variables: true
external-modules-download-path: .external_modules
file:
- tfplan.json
framework:
- terraform_plan
mask: []
quiet: true
repo-root-for-plan-enrichment:
- .
secrets-history-timeout: 12h
secrets-scan-file-type: []
skip-check:
- CKV2_AWS_34
summary-position: top
```
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure Cluster logging with CMK"
8 id = "CKV_AWS_224"
9 supported_resources = ['aws_ecs_cluster']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 configuration = conf.get("configuration")
15 if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):
16 command_conf = configuration[0].get('execute_command_configuration')[0]
17 if not command_conf.get('logging') == ['NONE']:
18 if command_conf.get('kms_key_id'):
19 if command_conf.get('log_configuration'):
20 log_conf = command_conf.get('log_configuration')[0]
21 if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
22 log_conf.get('s3_bucket_encryption_enabled') == [True]:
23 return CheckResult.PASSED
24 return CheckResult.FAILED
25 else:
26 return CheckResult.FAILED
27
28 return CheckResult.UNKNOWN
29
30
31 check = ECSClusterLoggingEncryptedWithCMK()
32
[end of checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
--- a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
+++ b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
@@ -1,28 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):
- def __init__(self):
- name = "Ensure Cluster logging with CMK"
+ def __init__(self) -> None:
+ name = "Ensure ECS Cluster logging uses CMK"
id = "CKV_AWS_224"
- supported_resources = ['aws_ecs_cluster']
- categories = [CheckCategories.ENCRYPTION]
+ supported_resources = ("aws_ecs_cluster",)
+ categories = (CheckCategories.ENCRYPTION,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
configuration = conf.get("configuration")
- if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):
- command_conf = configuration[0].get('execute_command_configuration')[0]
- if not command_conf.get('logging') == ['NONE']:
- if command_conf.get('kms_key_id'):
- if command_conf.get('log_configuration'):
- log_conf = command_conf.get('log_configuration')[0]
- if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
- log_conf.get('s3_bucket_encryption_enabled') == [True]:
- return CheckResult.PASSED
- return CheckResult.FAILED
- else:
+ if configuration and isinstance(configuration, list) and isinstance(configuration[0], dict):
+ execute_command = configuration[0].get("execute_command_configuration")
+ if execute_command and isinstance(execute_command, list):
+ execute_command = execute_command[0]
+ if isinstance(execute_command, dict) and not execute_command.get("logging") == ["NONE"]:
+ if execute_command.get("kms_key_id"):
+ log_conf = execute_command.get("log_configuration")
+ if log_conf and isinstance(log_conf, list):
+ log_conf = log_conf[0]
+ if isinstance(log_conf, dict) and (
+ log_conf.get("cloud_watch_encryption_enabled") == [True]
+ or log_conf.get("s3_bucket_encryption_enabled") == [True]
+ ):
+ return CheckResult.PASSED
+
return CheckResult.FAILED
return CheckResult.UNKNOWN
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n--- a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n+++ b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n@@ -1,28 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):\n- def __init__(self):\n- name = \"Ensure Cluster logging with CMK\"\n+ def __init__(self) -> None:\n+ name = \"Ensure ECS Cluster logging uses CMK\"\n id = \"CKV_AWS_224\"\n- supported_resources = ['aws_ecs_cluster']\n- categories = [CheckCategories.ENCRYPTION]\n+ supported_resources = (\"aws_ecs_cluster\",)\n+ categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n configuration = conf.get(\"configuration\")\n- if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):\n- command_conf = configuration[0].get('execute_command_configuration')[0]\n- if not command_conf.get('logging') == ['NONE']:\n- if command_conf.get('kms_key_id'):\n- if command_conf.get('log_configuration'):\n- log_conf = command_conf.get('log_configuration')[0]\n- if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\n- log_conf.get('s3_bucket_encryption_enabled') == [True]:\n- return CheckResult.PASSED\n- return CheckResult.FAILED\n- else:\n+ if configuration and isinstance(configuration, list) and isinstance(configuration[0], dict):\n+ execute_command = configuration[0].get(\"execute_command_configuration\")\n+ if execute_command and isinstance(execute_command, list):\n+ execute_command = execute_command[0]\n+ if isinstance(execute_command, dict) and not execute_command.get(\"logging\") == [\"NONE\"]:\n+ if execute_command.get(\"kms_key_id\"):\n+ log_conf = execute_command.get(\"log_configuration\")\n+ if log_conf and isinstance(log_conf, list):\n+ log_conf = log_conf[0]\n+ if isinstance(log_conf, dict) and (\n+ log_conf.get(\"cloud_watch_encryption_enabled\") == [True]\n+ or log_conf.get(\"s3_bucket_encryption_enabled\") == [True]\n+ ):\n+ return CheckResult.PASSED\n+\n return CheckResult.FAILED\n \n return CheckResult.UNKNOWN\n", "issue": "Failed to run check CKV_AWS_224: TemplateAttributeError: get is invalid\n**Describe the issue**\r\nError occurs when checked ECS Cluster using terraform_plan framework.\r\n\r\n**Examples**\r\n```\r\nmodule \"cluster\" {\r\n source = \"terraform-aws-modules/ecs/aws\"\r\n version = \"4.1.3\"\r\n\r\n cluster_name = \"foo\"\r\n fargate_capacity_providers = {\r\n FARGATE = {}\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n- checkov 2.3.165\r\n- terraform 1.4.5\r\n- aws provider 4.63.0\r\n\r\n**Additional context**\r\ntraceback:\r\n```\r\n2023-04-18 09:53:09,676 [MainThread ] [ERROR] Failed to run check CKV_AWS_224 on /tfplan.json:aws_ecs_cluster.this\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/common/checks/base_check.py\", line 73, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\", line 21, in scan_resource_conf\r\n if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/common/parsers/node.py\", line 189, in __getattr__\r\n raise TemplateAttributeError(f'{name} is invalid')\r\ncheckov.common.parsers.node.TemplateAttributeError: get is invalid\r\n```\r\n\r\nThis only occurs when using terraform_plan framework. It works without issue when using vanilla terraform framework.\r\n\r\nThe plan generation is just `terraform plan -out tfplan.bin && terraform show -json tfplan.bin > tfplan.json` then running `checkof -f tfplan.json`.\r\n\r\nHere is my checkov config file in repo:\r\n```\r\n\u279c cat .checkov.yaml \r\nblock-list-secret-scan: []\r\ncompact: true\r\ndownload-external-modules: true\r\nevaluate-variables: true\r\nexternal-modules-download-path: .external_modules\r\nfile:\r\n- tfplan.json\r\nframework:\r\n- terraform_plan\r\nmask: []\r\nquiet: true\r\nrepo-root-for-plan-enrichment:\r\n- .\r\nsecrets-history-timeout: 12h\r\nsecrets-scan-file-type: []\r\nskip-check:\r\n- CKV2_AWS_34\r\nsummary-position: top\r\n```\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Cluster logging with CMK\"\n id = \"CKV_AWS_224\"\n supported_resources = ['aws_ecs_cluster']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n configuration = conf.get(\"configuration\")\n if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):\n command_conf = configuration[0].get('execute_command_configuration')[0]\n if not command_conf.get('logging') == ['NONE']:\n if command_conf.get('kms_key_id'):\n if command_conf.get('log_configuration'):\n log_conf = command_conf.get('log_configuration')[0]\n if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\n log_conf.get('s3_bucket_encryption_enabled') == [True]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n\n return CheckResult.UNKNOWN\n\n\ncheck = ECSClusterLoggingEncryptedWithCMK()\n", "path": "checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py"}]} | 1,525 | 671 |
gh_patches_debug_16904 | rasdani/github-patches | git_diff | saleor__saleor-5443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creating a new sale raises error in Celery task
### Steps to reproduce the problem
1. Run the following mutation as an admin user (with `MANAGE_DISCOUNTS` permission):
```
mutation {
saleCreate(input: {name: "Test"}) {
errors {
field
message
}
sale {
id
name
}
}
}
```
The response from API is successful, but in the Django server console I'm getting the following error:
```
ERROR celery.app.trace Task saleor.product.tasks.update_products_minimal_variant_prices_of_discount_task[4ec46245-d1f1-47ae-ab23-0c0ab73a9981] raised unexpected: ValueError('Provide at least one of the ID lists:\n\tproduct_ids,\n\tcategory_ids,\n\tcollection_ids.') [PID:31316:Thread-175]
Traceback (most recent call last):
File "/Users/marcin/.pyenv/versions/saleor3.8.1/lib/python3.8/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/tasks.py", line 64, in update_products_minimal_variant_prices_of_discount_task
update_products_minimal_variant_prices_of_discount(discount)
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py", line 76, in update_products_minimal_variant_prices_of_discount
update_products_minimal_variant_prices_of_catalogues(
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py", line 62, in update_products_minimal_variant_prices_of_catalogues
raise ValueError(
ValueError: Provide at least one of the ID lists:
product_ids,
category_ids,
collection_ids.
```
I suppose that the Celery task that recalculates minimal variant prices is run even there are no products to update. Probably an additional check needs to be added to not run the task in this case.
</issue>
<code>
[start of saleor/product/utils/variant_prices.py]
1 import operator
2 from functools import reduce
3
4 from django.db.models.query_utils import Q
5 from prices import Money
6
7 from ...discount.utils import fetch_active_discounts
8 from ..models import Product
9
10
11 def _get_product_minimal_variant_price(product, discounts) -> Money:
12 # Start with the product's price as the minimal one
13 minimal_variant_price = product.price
14 for variant in product.variants.all():
15 variant_price = variant.get_price(discounts=discounts)
16 minimal_variant_price = min(minimal_variant_price, variant_price)
17 return minimal_variant_price
18
19
20 def update_product_minimal_variant_price(product, discounts=None, save=True):
21 if discounts is None:
22 discounts = fetch_active_discounts()
23 minimal_variant_price = _get_product_minimal_variant_price(product, discounts)
24 if product.minimal_variant_price != minimal_variant_price:
25 product.minimal_variant_price_amount = minimal_variant_price.amount
26 if save:
27 product.save(update_fields=["minimal_variant_price_amount", "updated_at"])
28 return product
29
30
31 def update_products_minimal_variant_prices(products, discounts=None):
32 if discounts is None:
33 discounts = fetch_active_discounts()
34 changed_products_to_update = []
35 for product in products:
36 old_minimal_variant_price = product.minimal_variant_price
37 updated_product = update_product_minimal_variant_price(
38 product, discounts, save=False
39 )
40 # Check if the "minimal_variant_price" has changed
41 if updated_product.minimal_variant_price != old_minimal_variant_price:
42 changed_products_to_update.append(updated_product)
43 # Bulk update the changed products
44 Product.objects.bulk_update(
45 changed_products_to_update, ["minimal_variant_price_amount"]
46 )
47
48
49 def update_products_minimal_variant_prices_of_catalogues(
50 product_ids=None, category_ids=None, collection_ids=None
51 ):
52 # Building the matching products query
53 q_list = []
54 if product_ids:
55 q_list.append(Q(pk__in=product_ids))
56 if category_ids:
57 q_list.append(Q(category_id__in=category_ids))
58 if collection_ids:
59 q_list.append(Q(collectionproduct__collection_id__in=collection_ids))
60 # Asserting that the function was called with some ids
61 if not q_list:
62 raise ValueError(
63 "Provide at least one of the ID lists:\n"
64 "\tproduct_ids,\n"
65 "\tcategory_ids,\n"
66 "\tcollection_ids."
67 )
68 # Querying the products
69 q_or = reduce(operator.or_, q_list)
70 products = Product.objects.filter(q_or).distinct()
71
72 update_products_minimal_variant_prices(products)
73
74
75 def update_products_minimal_variant_prices_of_discount(discount):
76 update_products_minimal_variant_prices_of_catalogues(
77 product_ids=discount.products.all().values_list("id", flat=True),
78 category_ids=discount.categories.all().values_list("id", flat=True),
79 collection_ids=discount.collections.all().values_list("id", flat=True),
80 )
81
[end of saleor/product/utils/variant_prices.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/utils/variant_prices.py b/saleor/product/utils/variant_prices.py
--- a/saleor/product/utils/variant_prices.py
+++ b/saleor/product/utils/variant_prices.py
@@ -58,18 +58,12 @@
if collection_ids:
q_list.append(Q(collectionproduct__collection_id__in=collection_ids))
# Asserting that the function was called with some ids
- if not q_list:
- raise ValueError(
- "Provide at least one of the ID lists:\n"
- "\tproduct_ids,\n"
- "\tcategory_ids,\n"
- "\tcollection_ids."
- )
- # Querying the products
- q_or = reduce(operator.or_, q_list)
- products = Product.objects.filter(q_or).distinct()
+ if q_list:
+ # Querying the products
+ q_or = reduce(operator.or_, q_list)
+ products = Product.objects.filter(q_or).distinct()
- update_products_minimal_variant_prices(products)
+ update_products_minimal_variant_prices(products)
def update_products_minimal_variant_prices_of_discount(discount):
| {"golden_diff": "diff --git a/saleor/product/utils/variant_prices.py b/saleor/product/utils/variant_prices.py\n--- a/saleor/product/utils/variant_prices.py\n+++ b/saleor/product/utils/variant_prices.py\n@@ -58,18 +58,12 @@\n if collection_ids:\n q_list.append(Q(collectionproduct__collection_id__in=collection_ids))\n # Asserting that the function was called with some ids\n- if not q_list:\n- raise ValueError(\n- \"Provide at least one of the ID lists:\\n\"\n- \"\\tproduct_ids,\\n\"\n- \"\\tcategory_ids,\\n\"\n- \"\\tcollection_ids.\"\n- )\n- # Querying the products\n- q_or = reduce(operator.or_, q_list)\n- products = Product.objects.filter(q_or).distinct()\n+ if q_list:\n+ # Querying the products\n+ q_or = reduce(operator.or_, q_list)\n+ products = Product.objects.filter(q_or).distinct()\n \n- update_products_minimal_variant_prices(products)\n+ update_products_minimal_variant_prices(products)\n \n \n def update_products_minimal_variant_prices_of_discount(discount):\n", "issue": "Creating a new sale raises error in Celery task\n### Steps to reproduce the problem\r\n1. Run the following mutation as an admin user (with `MANAGE_DISCOUNTS` permission):\r\n```\r\nmutation {\r\n saleCreate(input: {name: \"Test\"}) {\r\n errors {\r\n field\r\n message\r\n }\r\n sale {\r\n id\r\n name\r\n }\r\n }\r\n}\r\n```\r\n\r\nThe response from API is successful, but in the Django server console I'm getting the following error:\r\n\r\n```\r\nERROR celery.app.trace Task saleor.product.tasks.update_products_minimal_variant_prices_of_discount_task[4ec46245-d1f1-47ae-ab23-0c0ab73a9981] raised unexpected: ValueError('Provide at least one of the ID lists:\\n\\tproduct_ids,\\n\\tcategory_ids,\\n\\tcollection_ids.') [PID:31316:Thread-175]\r\nTraceback (most recent call last):\r\n File \"/Users/marcin/.pyenv/versions/saleor3.8.1/lib/python3.8/site-packages/celery/app/trace.py\", line 385, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/tasks.py\", line 64, in update_products_minimal_variant_prices_of_discount_task\r\n update_products_minimal_variant_prices_of_discount(discount)\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py\", line 76, in update_products_minimal_variant_prices_of_discount\r\n update_products_minimal_variant_prices_of_catalogues(\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py\", line 62, in update_products_minimal_variant_prices_of_catalogues\r\n raise ValueError(\r\nValueError: Provide at least one of the ID lists:\r\n\tproduct_ids,\r\n\tcategory_ids,\r\n\tcollection_ids.\r\n```\r\n\r\nI suppose that the Celery task that recalculates minimal variant prices is run even there are no products to update. Probably an additional check needs to be added to not run the task in this case.\n", "before_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.db.models.query_utils import Q\nfrom prices import Money\n\nfrom ...discount.utils import fetch_active_discounts\nfrom ..models import Product\n\n\ndef _get_product_minimal_variant_price(product, discounts) -> Money:\n # Start with the product's price as the minimal one\n minimal_variant_price = product.price\n for variant in product.variants.all():\n variant_price = variant.get_price(discounts=discounts)\n minimal_variant_price = min(minimal_variant_price, variant_price)\n return minimal_variant_price\n\n\ndef update_product_minimal_variant_price(product, discounts=None, save=True):\n if discounts is None:\n discounts = fetch_active_discounts()\n minimal_variant_price = _get_product_minimal_variant_price(product, discounts)\n if product.minimal_variant_price != minimal_variant_price:\n product.minimal_variant_price_amount = minimal_variant_price.amount\n if save:\n product.save(update_fields=[\"minimal_variant_price_amount\", \"updated_at\"])\n return product\n\n\ndef update_products_minimal_variant_prices(products, discounts=None):\n if discounts is None:\n discounts = fetch_active_discounts()\n changed_products_to_update = []\n for product in products:\n old_minimal_variant_price = product.minimal_variant_price\n updated_product = update_product_minimal_variant_price(\n product, discounts, save=False\n )\n # Check if the \"minimal_variant_price\" has changed\n if updated_product.minimal_variant_price != old_minimal_variant_price:\n changed_products_to_update.append(updated_product)\n # Bulk update the changed products\n Product.objects.bulk_update(\n changed_products_to_update, [\"minimal_variant_price_amount\"]\n )\n\n\ndef update_products_minimal_variant_prices_of_catalogues(\n product_ids=None, category_ids=None, collection_ids=None\n):\n # Building the matching products query\n q_list = []\n if product_ids:\n q_list.append(Q(pk__in=product_ids))\n if category_ids:\n q_list.append(Q(category_id__in=category_ids))\n if collection_ids:\n q_list.append(Q(collectionproduct__collection_id__in=collection_ids))\n # Asserting that the function was called with some ids\n if not q_list:\n raise ValueError(\n \"Provide at least one of the ID lists:\\n\"\n \"\\tproduct_ids,\\n\"\n \"\\tcategory_ids,\\n\"\n \"\\tcollection_ids.\"\n )\n # Querying the products\n q_or = reduce(operator.or_, q_list)\n products = Product.objects.filter(q_or).distinct()\n\n update_products_minimal_variant_prices(products)\n\n\ndef update_products_minimal_variant_prices_of_discount(discount):\n update_products_minimal_variant_prices_of_catalogues(\n product_ids=discount.products.all().values_list(\"id\", flat=True),\n category_ids=discount.categories.all().values_list(\"id\", flat=True),\n collection_ids=discount.collections.all().values_list(\"id\", flat=True),\n )\n", "path": "saleor/product/utils/variant_prices.py"}]} | 1,802 | 254 |
gh_patches_debug_3508 | rasdani/github-patches | git_diff | translate__pootle-6497 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Top scorers list includes zero score users
The top scorer list in e.g. `/af/?details` includes a number of users with zero score.
I'm doubtful that these contributed in last 30 days. So they shouldn't be on the list at all.

</issue>
<code>
[start of pootle/apps/pootle_score/utils.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from datetime import date, datetime, timedelta
10
11 import pytz
12
13 from django.contrib.auth import get_user_model
14 from django.db.models import Sum
15 from django.utils.functional import cached_property
16
17 from pootle.core.decorators import persistent_property
18 from pootle.core.delegate import display, revision, scores
19 from pootle.core.utils.timezone import localdate, make_aware
20 from pootle_app.models import Directory
21 from pootle_language.models import Language
22
23 from .apps import PootleScoreConfig
24 from .models import UserTPScore
25
26
27 User = get_user_model()
28
29
30 def to_datetime(possible_dt):
31 if possible_dt is None:
32 return
33 if isinstance(possible_dt, datetime):
34 return possible_dt
35 if isinstance(possible_dt, date):
36 return make_aware(
37 datetime.combine(
38 possible_dt,
39 datetime.min.time())).astimezone(
40 pytz.timezone("UTC"))
41
42
43 class Scores(object):
44 ns = "pootle.score"
45 sw_version = PootleScoreConfig.version
46
47 def __init__(self, context):
48 self.context = context
49
50 @property
51 def revision(self):
52 return revision.get(Directory)(
53 self.context.directory).get(key="stats")
54
55 @property
56 def score_model(self):
57 return UserTPScore.objects.exclude(
58 user__username__in=User.objects.META_USERS)
59
60 def get_daterange(self, days):
61 now = localdate()
62 return now - timedelta(days), now
63
64 def scores_within_days(self, days):
65 return self.score_model.filter(
66 date__range=self.get_daterange(days))
67
68 def get_scores(self, days):
69 return self.filter_scores(self.scores_within_days(days))
70
71 def get_top_scorers(self, days=30):
72 """Returns users with the top scores.
73
74 :param days: period of days to account for scores.
75 """
76 return self.get_scores(days).order_by("user__username").values(
77 "user__username", "user__email", "user__full_name").annotate(
78 Sum("score"),
79 Sum("suggested"),
80 Sum("reviewed"),
81 Sum("translated")).order_by("-score__sum")
82
83 def filter_scores(self, qs):
84 return qs
85
86 @persistent_property
87 def top_scorers(self):
88 return tuple(self.get_top_scorers())
89
90 def display(self, offset=0, limit=5, language=None, formatter=None):
91 scorers = self.top_scorers
92 if offset or limit:
93 scorers = list(scorers)
94 if offset:
95 scorers = scorers[offset:]
96 if limit:
97 scorers = scorers[:limit]
98 return display.get(Scores)(
99 top_scores=scorers,
100 formatter=formatter,
101 language=language)
102
103
104 class LanguageScores(Scores):
105 ns = "pootle.score.language"
106
107 @cached_property
108 def cache_key(self):
109 return (
110 "%s.%s.%s"
111 % (self.context.code,
112 localdate(),
113 self.revision))
114
115 def filter_scores(self, qs):
116 return qs.filter(tp__language_id=self.context.id)
117
118
119 class ProjectScores(Scores):
120 ns = "pootle.score.project"
121
122 @cached_property
123 def cache_key(self):
124 return (
125 "%s.%s.%s"
126 % (self.context.code,
127 localdate(),
128 self.revision))
129
130 def filter_scores(self, qs):
131 return qs.filter(tp__project_id=self.context.id)
132
133
134 class ProjectSetScores(Scores):
135 ns = "pootle.score.projects"
136
137 @cached_property
138 def cache_key(self):
139 return (
140 "%s.%s"
141 % (localdate(),
142 self.revision))
143
144
145 class TPScores(Scores):
146 ns = "pootle.score.tp"
147
148 @cached_property
149 def cache_key(self):
150 return (
151 "%s/%s.%s.%s"
152 % (self.context.language.code,
153 self.context.project.code,
154 localdate(),
155 self.revision))
156
157 def filter_scores(self, qs):
158 return qs.filter(tp_id=self.context.id)
159
160
161 class UserScores(Scores):
162 ns = "pootle.score.user"
163
164 @cached_property
165 def cache_key(self):
166 return (
167 "%s.%s.%s"
168 % (self.context.id,
169 localdate(),
170 self.revision))
171
172 @property
173 def revision(self):
174 return revision.get(Directory)(
175 Directory.objects.projects).get(key="stats")
176
177 @property
178 def score_model(self):
179 return self.context.scores
180
181 @property
182 def public_score(self):
183 return self.context.public_score
184
185 @persistent_property
186 def top_language(self):
187 return self.get_top_language()
188
189 def get_top_language_within(self, days):
190 top_lang = self.get_scores_by_language(
191 days).order_by("score__sum").first()
192 if top_lang:
193 return Language.objects.get(id=top_lang["tp__language"])
194
195 def get_scores_by_language(self, days):
196 """Languages that the user has contributed to in the last `days`,
197 and the summary score
198 """
199 return self.get_scores(days).order_by(
200 "tp__language").values("tp__language").annotate(Sum("score"))
201
202 def get_language_top_scores(self, language):
203 return scores.get(language.__class__)(language).top_scorers
204
205 def get_top_language(self, days=30):
206 """Returns the top language the user has contributed to and its
207 position.
208
209 "Top language" is defined as the language with the highest
210 aggregate score delta within the last `days` days.
211
212 :param days: period of days to account for scores.
213 :return: Tuple of `(position, Language)`. If there's no delta in
214 the score for the given period for any of the languages,
215 `(-1, None)` is returned.
216 """
217 language = self.get_top_language_within(days)
218 if language:
219 # this only gets scores for the last 30 days as that is cached
220 language_scores = self.get_language_top_scores(language)
221 for index, user_score in enumerate(language_scores):
222 if user_score['user__username'] == self.context.username:
223 return index + 1, language
224 return -1, language
225
[end of pootle/apps/pootle_score/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_score/utils.py b/pootle/apps/pootle_score/utils.py
--- a/pootle/apps/pootle_score/utils.py
+++ b/pootle/apps/pootle_score/utils.py
@@ -78,7 +78,8 @@
Sum("score"),
Sum("suggested"),
Sum("reviewed"),
- Sum("translated")).order_by("-score__sum")
+ Sum("translated")).filter(
+ score__sum__gt=0).order_by("-score__sum")
def filter_scores(self, qs):
return qs
| {"golden_diff": "diff --git a/pootle/apps/pootle_score/utils.py b/pootle/apps/pootle_score/utils.py\n--- a/pootle/apps/pootle_score/utils.py\n+++ b/pootle/apps/pootle_score/utils.py\n@@ -78,7 +78,8 @@\n Sum(\"score\"),\n Sum(\"suggested\"),\n Sum(\"reviewed\"),\n- Sum(\"translated\")).order_by(\"-score__sum\")\n+ Sum(\"translated\")).filter(\n+ score__sum__gt=0).order_by(\"-score__sum\")\n \n def filter_scores(self, qs):\n return qs\n", "issue": "Top scorers list includes zero score users\nThe top scorer list in e.g. `/af/?details` includes a number of users with zero score.\r\n\r\nI'm doubtful that these contributed in last 30 days. So they shouldn't be on the list at all.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom datetime import date, datetime, timedelta\n\nimport pytz\n\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Sum\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.delegate import display, revision, scores\nfrom pootle.core.utils.timezone import localdate, make_aware\nfrom pootle_app.models import Directory\nfrom pootle_language.models import Language\n\nfrom .apps import PootleScoreConfig\nfrom .models import UserTPScore\n\n\nUser = get_user_model()\n\n\ndef to_datetime(possible_dt):\n if possible_dt is None:\n return\n if isinstance(possible_dt, datetime):\n return possible_dt\n if isinstance(possible_dt, date):\n return make_aware(\n datetime.combine(\n possible_dt,\n datetime.min.time())).astimezone(\n pytz.timezone(\"UTC\"))\n\n\nclass Scores(object):\n ns = \"pootle.score\"\n sw_version = PootleScoreConfig.version\n\n def __init__(self, context):\n self.context = context\n\n @property\n def revision(self):\n return revision.get(Directory)(\n self.context.directory).get(key=\"stats\")\n\n @property\n def score_model(self):\n return UserTPScore.objects.exclude(\n user__username__in=User.objects.META_USERS)\n\n def get_daterange(self, days):\n now = localdate()\n return now - timedelta(days), now\n\n def scores_within_days(self, days):\n return self.score_model.filter(\n date__range=self.get_daterange(days))\n\n def get_scores(self, days):\n return self.filter_scores(self.scores_within_days(days))\n\n def get_top_scorers(self, days=30):\n \"\"\"Returns users with the top scores.\n\n :param days: period of days to account for scores.\n \"\"\"\n return self.get_scores(days).order_by(\"user__username\").values(\n \"user__username\", \"user__email\", \"user__full_name\").annotate(\n Sum(\"score\"),\n Sum(\"suggested\"),\n Sum(\"reviewed\"),\n Sum(\"translated\")).order_by(\"-score__sum\")\n\n def filter_scores(self, qs):\n return qs\n\n @persistent_property\n def top_scorers(self):\n return tuple(self.get_top_scorers())\n\n def display(self, offset=0, limit=5, language=None, formatter=None):\n scorers = self.top_scorers\n if offset or limit:\n scorers = list(scorers)\n if offset:\n scorers = scorers[offset:]\n if limit:\n scorers = scorers[:limit]\n return display.get(Scores)(\n top_scores=scorers,\n formatter=formatter,\n language=language)\n\n\nclass LanguageScores(Scores):\n ns = \"pootle.score.language\"\n\n @cached_property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.context.code,\n localdate(),\n self.revision))\n\n def filter_scores(self, qs):\n return qs.filter(tp__language_id=self.context.id)\n\n\nclass ProjectScores(Scores):\n ns = \"pootle.score.project\"\n\n @cached_property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.context.code,\n localdate(),\n self.revision))\n\n def filter_scores(self, qs):\n return qs.filter(tp__project_id=self.context.id)\n\n\nclass ProjectSetScores(Scores):\n ns = \"pootle.score.projects\"\n\n @cached_property\n def cache_key(self):\n return (\n \"%s.%s\"\n % (localdate(),\n self.revision))\n\n\nclass TPScores(Scores):\n ns = \"pootle.score.tp\"\n\n @cached_property\n def cache_key(self):\n return (\n \"%s/%s.%s.%s\"\n % (self.context.language.code,\n self.context.project.code,\n localdate(),\n self.revision))\n\n def filter_scores(self, qs):\n return qs.filter(tp_id=self.context.id)\n\n\nclass UserScores(Scores):\n ns = \"pootle.score.user\"\n\n @cached_property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.context.id,\n localdate(),\n self.revision))\n\n @property\n def revision(self):\n return revision.get(Directory)(\n Directory.objects.projects).get(key=\"stats\")\n\n @property\n def score_model(self):\n return self.context.scores\n\n @property\n def public_score(self):\n return self.context.public_score\n\n @persistent_property\n def top_language(self):\n return self.get_top_language()\n\n def get_top_language_within(self, days):\n top_lang = self.get_scores_by_language(\n days).order_by(\"score__sum\").first()\n if top_lang:\n return Language.objects.get(id=top_lang[\"tp__language\"])\n\n def get_scores_by_language(self, days):\n \"\"\"Languages that the user has contributed to in the last `days`,\n and the summary score\n \"\"\"\n return self.get_scores(days).order_by(\n \"tp__language\").values(\"tp__language\").annotate(Sum(\"score\"))\n\n def get_language_top_scores(self, language):\n return scores.get(language.__class__)(language).top_scorers\n\n def get_top_language(self, days=30):\n \"\"\"Returns the top language the user has contributed to and its\n position.\n\n \"Top language\" is defined as the language with the highest\n aggregate score delta within the last `days` days.\n\n :param days: period of days to account for scores.\n :return: Tuple of `(position, Language)`. If there's no delta in\n the score for the given period for any of the languages,\n `(-1, None)` is returned.\n \"\"\"\n language = self.get_top_language_within(days)\n if language:\n # this only gets scores for the last 30 days as that is cached\n language_scores = self.get_language_top_scores(language)\n for index, user_score in enumerate(language_scores):\n if user_score['user__username'] == self.context.username:\n return index + 1, language\n return -1, language\n", "path": "pootle/apps/pootle_score/utils.py"}]} | 2,695 | 132 |
gh_patches_debug_15631 | rasdani/github-patches | git_diff | chainer__chainer-7401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve error message when ChainerX is unavailable
When a user attempt to use `cuda:0` device, ChainerX says the device specifier is invalid if `chainerx.is_available()` is `False`. It seems a bit difficult to deduce the actual problem from the message.
```
$ python train_mnist.py -d cuda:0
Traceback (most recent call last):
File "train_mnist.py", line 134, in <module>
main()
File "train_mnist.py", line 56, in main
device = chainer.get_device(args.device)
File "/path/to/chainer/chainer/backend.py", line 157, in get_device
raise ValueError('Invalid device specifier: {}'.format(device_spec))
ValueError: Invalid device specifier: cuda:0
```
</issue>
<code>
[start of chainer/backend.py]
1 import numpy
2 import six
3
4 import chainer
5 from chainer.backends import _chainerx
6 from chainer.backends import _cpu
7 from chainer.backends import cuda
8 from chainer.backends import intel64
9 import chainerx
10
11 # Aliases
12 from chainer._backend import Device
13 from chainer.backends._chainerx import ChainerxDevice
14 from chainer.backends._chainerx import from_chx # NOQA
15 from chainer.backends._chainerx import to_chx # NOQA
16 from chainer.backends._cpu import CpuDevice
17 from chainer.backends.cuda import GpuDevice
18 from chainer.backends.intel64 import Intel64Device
19 from chainer import types # NOQA
20
21
22 def _contains_nan(x):
23 """Returns whether the input array has NaN values.
24
25 Args:
26 x (numpy.ndarray or cupy.ndarray): Array to be checked.
27
28 Returns:
29 bool: True if the input has NaN values.
30
31 """
32 if x.dtype.kind in ('f', 'c'):
33 device = get_device_from_array(x)
34 with chainer.using_device(device):
35 return device.xp.isnan(x).any()
36 else:
37 return False
38
39
40 def copyto(dst, src):
41 """Copies the elements of an ndarray to those of another one.
42
43 This function can copy the CPU/GPU arrays to the destination arrays on
44 another device.
45
46 Args:
47 dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
48 Destination array.
49 src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
50 Source array.
51
52 """
53 if isinstance(dst, chainerx.ndarray):
54 dst[...] = _chainerx._array_to_chainerx(src, dst.device)
55 elif isinstance(dst, numpy.ndarray):
56 numpy.copyto(dst, _cpu._to_cpu(src))
57 elif isinstance(dst, intel64.mdarray):
58 intel64.ideep.basic_copyto(
59 dst, _cpu._to_cpu(src))
60 elif isinstance(dst, cuda.ndarray):
61 if isinstance(src, chainer.get_cpu_array_types()):
62 src = numpy.asarray(src)
63 if dst.flags.c_contiguous or dst.flags.f_contiguous:
64 dst.set(src)
65 else:
66 cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
67 elif isinstance(src, cuda.ndarray):
68 cuda.cupy.copyto(dst, src)
69 else:
70 raise TypeError('cannot copy from non-array object of type {}'
71 .format(type(src)))
72 else:
73 raise TypeError('cannot copy to non-array object of type {}'.format(
74 type(dst)))
75
76
77 def _guess_device_from_array_module(xp):
78 """Returns a plausible device from array module
79
80 .. warning::
81
82 There can be multiple devices for a module
83
84 """
85 if xp is cuda.cupy:
86 return cuda.GpuDevice(cuda.Device())
87 elif xp is chainerx:
88 return _chainerx.ChainerxDevice(chainerx.get_default_device())
89 else:
90 # Cannot detect intel64, because xp of intel64 is numpy.
91 return _cpu.CpuDevice()
92
93
94 def get_device(device_spec):
95 # type: (types.DeviceSpec) -> Device
96 """Returns a device object.
97
98 Args:
99 device_spec (object): Device specifier.
100 If a :class:`chainer.backend.Device` instance is given, it is
101 returned intact. Otherwise the following values are supported:
102
103 * ChainerX devices
104
105 * A string representing a device.
106 (ex. ``'native:0'``, ``'native'``)
107 * A :class:`chainerx.Device` object.
108
109 * CuPy
110
111 * A string starts with ``'@cupy:'``.
112 (ex. ``'@cupy:0'``)
113 * A :class:`cupy.cuda.Device` object.
114
115 * NumPy
116
117 * The string ``'@numpy'``.
118
119 * NumPy with Intel Architecture
120
121 * The string ``'@intel64'``.
122 """
123 if isinstance(device_spec, Device):
124 return device_spec
125
126 if isinstance(device_spec, cuda._integer_types):
127 return _get_device_cupy_or_numpy(device_spec)
128
129 if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
130 return _chainerx.ChainerxDevice(device_spec)
131
132 if cuda.available and isinstance(device_spec, cuda.Device):
133 return cuda.GpuDevice(device_spec)
134
135 if isinstance(device_spec, six.string_types):
136 # '-1', '0', '1', ...
137 try:
138 int_device_spec = int(device_spec)
139 except ValueError:
140 pass
141 else:
142 return _get_device_cupy_or_numpy(int_device_spec)
143
144 if device_spec.startswith('@'):
145 # '@module:...'
146 mod_name, colon, precise_spec = device_spec[1:].partition(':')
147 if mod_name == 'numpy':
148 if not colon:
149 return _cpu.CpuDevice()
150 elif mod_name == 'cupy':
151 if colon:
152 return cuda.GpuDevice.from_device_id(int(precise_spec))
153 elif mod_name == 'intel64':
154 if not colon:
155 return intel64.Intel64Device()
156
157 elif chainerx.is_available():
158 return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))
159
160 raise ValueError('Invalid device specifier: {}'.format(device_spec))
161
162
163 def _get_device_cupy_or_numpy(device_spec):
164 # legacy spec of (gpu) device
165 if device_spec >= 0:
166 return cuda.GpuDevice.from_device_id(device_spec)
167 else:
168 return _cpu.CpuDevice()
169
170
171 def using_device(device_spec):
172 """Context manager to apply the thread-local device state.
173
174 Args:
175 device_spec (object): Device specifier. See :func:`chainer.get_device`
176 for details.
177
178 .. admonition:: Example
179
180 .. testcode::
181 :skipif: doctest_helper.skipif_not_enough_cuda_devices(2)
182
183 with chainer.using_device('@cupy:1'):
184 a = cupy.empty((3, 2))
185
186 assert a.device.id == 1
187
188 """
189
190 # TODO(niboshi): Set default device (once this concept is introduced in
191 # Chainer).
192 device = get_device(device_spec)
193 return device.create_context()
194
195
196 def get_array_module(*args):
197 """Gets an appropriate NumPy-compatible module to process arguments
198
199 This function will return their data arrays' array module for
200 :class:`~chainer.Variable` arguments.
201
202 Args:
203 args: Values to determine whether NumPy, CuPy, or ChainerX should be
204 used.
205
206 Returns:
207 module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
208 on the types of the arguments.
209
210 """
211 is_chainerx_available = chainerx.is_available()
212 if is_chainerx_available or cuda.available:
213 arrays = []
214 for arg in args:
215 # Unwrap arrays
216 if isinstance(arg, chainer.variable.Variable):
217 array = arg.data
218 else:
219 array = arg
220 if is_chainerx_available and isinstance(array, chainerx.ndarray):
221 return chainerx
222 arrays.append(array)
223 if cuda.available:
224 return cuda.cupy.get_array_module(*arrays)
225 return numpy
226
227
228 def get_device_from_array(*arrays):
229 """Gets the device from arrays.
230
231 The device on which the given array reside is returned.
232
233 .. note::
234
235 Unlike :func:`get_array_module`, this method does not recognize
236 :class:`~chainer.Variable` objects.
237 If you need to get device from the :class:`~chainer.Variable` instance
238 ``v``, you need to use ``get_device_from_array(v.array)``.
239
240 Args:
241 arrays (array or list of arrays):
242 Arrays to determine the device. If multiple arrays are given, the
243 device correspoinding to the first array which is not NumPy array
244 is returned.
245
246 Returns:
247 chainer.backend.Device: Device instance.
248 """
249 for array in arrays:
250 device = GpuDevice.from_array(array)
251 if device is not None:
252 return device
253
254 if isinstance(array, chainerx.ndarray):
255 return ChainerxDevice(array.device)
256
257 device = Intel64Device.from_array(array)
258 if device is not None:
259 return device
260
261 return CpuDevice()
262
[end of chainer/backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/backend.py b/chainer/backend.py
--- a/chainer/backend.py
+++ b/chainer/backend.py
@@ -154,7 +154,16 @@
if not colon:
return intel64.Intel64Device()
- elif chainerx.is_available():
+ else:
+ # String device specifier without '@' prefix is assumed to be a
+ # ChainerX device.
+ if not chainerx.is_available():
+ raise RuntimeError(
+ 'Tried to parse ChainerX device specifier \'{}\', '
+ 'but ChainerX is not available. '
+ 'Note that device specifiers without \'@\' prefix are '
+ 'assumed to be ChainerX device '
+ 'specifiers.'.format(device_spec))
return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))
raise ValueError('Invalid device specifier: {}'.format(device_spec))
| {"golden_diff": "diff --git a/chainer/backend.py b/chainer/backend.py\n--- a/chainer/backend.py\n+++ b/chainer/backend.py\n@@ -154,7 +154,16 @@\n if not colon:\n return intel64.Intel64Device()\n \n- elif chainerx.is_available():\n+ else:\n+ # String device specifier without '@' prefix is assumed to be a\n+ # ChainerX device.\n+ if not chainerx.is_available():\n+ raise RuntimeError(\n+ 'Tried to parse ChainerX device specifier \\'{}\\', '\n+ 'but ChainerX is not available. '\n+ 'Note that device specifiers without \\'@\\' prefix are '\n+ 'assumed to be ChainerX device '\n+ 'specifiers.'.format(device_spec))\n return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))\n \n raise ValueError('Invalid device specifier: {}'.format(device_spec))\n", "issue": "Improve error message when ChainerX is unavailable\nWhen a user attempt to use `cuda:0` device, ChainerX says the device specifier is invalid if `chainerx.is_available()` is `False`. It seems a bit difficult to deduce the actual problem from the message.\r\n\r\n```\r\n$ python train_mnist.py -d cuda:0\r\nTraceback (most recent call last):\r\n File \"train_mnist.py\", line 134, in <module>\r\n main()\r\n File \"train_mnist.py\", line 56, in main\r\n device = chainer.get_device(args.device)\r\n File \"/path/to/chainer/chainer/backend.py\", line 157, in get_device\r\n raise ValueError('Invalid device specifier: {}'.format(device_spec))\r\nValueError: Invalid device specifier: cuda:0\r\n```\n", "before_files": [{"content": "import numpy\nimport six\n\nimport chainer\nfrom chainer.backends import _chainerx\nfrom chainer.backends import _cpu\nfrom chainer.backends import cuda\nfrom chainer.backends import intel64\nimport chainerx\n\n# Aliases\nfrom chainer._backend import Device\nfrom chainer.backends._chainerx import ChainerxDevice\nfrom chainer.backends._chainerx import from_chx # NOQA\nfrom chainer.backends._chainerx import to_chx # NOQA\nfrom chainer.backends._cpu import CpuDevice\nfrom chainer.backends.cuda import GpuDevice\nfrom chainer.backends.intel64 import Intel64Device\nfrom chainer import types # NOQA\n\n\ndef _contains_nan(x):\n \"\"\"Returns whether the input array has NaN values.\n\n Args:\n x (numpy.ndarray or cupy.ndarray): Array to be checked.\n\n Returns:\n bool: True if the input has NaN values.\n\n \"\"\"\n if x.dtype.kind in ('f', 'c'):\n device = get_device_from_array(x)\n with chainer.using_device(device):\n return device.xp.isnan(x).any()\n else:\n return False\n\n\ndef copyto(dst, src):\n \"\"\"Copies the elements of an ndarray to those of another one.\n\n This function can copy the CPU/GPU arrays to the destination arrays on\n another device.\n\n Args:\n dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Destination array.\n src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):\n Source array.\n\n \"\"\"\n if isinstance(dst, chainerx.ndarray):\n dst[...] = _chainerx._array_to_chainerx(src, dst.device)\n elif isinstance(dst, numpy.ndarray):\n numpy.copyto(dst, _cpu._to_cpu(src))\n elif isinstance(dst, intel64.mdarray):\n intel64.ideep.basic_copyto(\n dst, _cpu._to_cpu(src))\n elif isinstance(dst, cuda.ndarray):\n if isinstance(src, chainer.get_cpu_array_types()):\n src = numpy.asarray(src)\n if dst.flags.c_contiguous or dst.flags.f_contiguous:\n dst.set(src)\n else:\n cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))\n elif isinstance(src, cuda.ndarray):\n cuda.cupy.copyto(dst, src)\n else:\n raise TypeError('cannot copy from non-array object of type {}'\n .format(type(src)))\n else:\n raise TypeError('cannot copy to non-array object of type {}'.format(\n type(dst)))\n\n\ndef _guess_device_from_array_module(xp):\n \"\"\"Returns a plausible device from array module\n\n .. warning::\n\n There can be multiple devices for a module\n\n \"\"\"\n if xp is cuda.cupy:\n return cuda.GpuDevice(cuda.Device())\n elif xp is chainerx:\n return _chainerx.ChainerxDevice(chainerx.get_default_device())\n else:\n # Cannot detect intel64, because xp of intel64 is numpy.\n return _cpu.CpuDevice()\n\n\ndef get_device(device_spec):\n # type: (types.DeviceSpec) -> Device\n \"\"\"Returns a device object.\n\n Args:\n device_spec (object): Device specifier.\n If a :class:`chainer.backend.Device` instance is given, it is\n returned intact. Otherwise the following values are supported:\n\n * ChainerX devices\n\n * A string representing a device.\n (ex. ``'native:0'``, ``'native'``)\n * A :class:`chainerx.Device` object.\n\n * CuPy\n\n * A string starts with ``'@cupy:'``.\n (ex. ``'@cupy:0'``)\n * A :class:`cupy.cuda.Device` object.\n\n * NumPy\n\n * The string ``'@numpy'``.\n\n * NumPy with Intel Architecture\n\n * The string ``'@intel64'``.\n \"\"\"\n if isinstance(device_spec, Device):\n return device_spec\n\n if isinstance(device_spec, cuda._integer_types):\n return _get_device_cupy_or_numpy(device_spec)\n\n if chainerx.is_available() and isinstance(device_spec, chainerx.Device):\n return _chainerx.ChainerxDevice(device_spec)\n\n if cuda.available and isinstance(device_spec, cuda.Device):\n return cuda.GpuDevice(device_spec)\n\n if isinstance(device_spec, six.string_types):\n # '-1', '0', '1', ...\n try:\n int_device_spec = int(device_spec)\n except ValueError:\n pass\n else:\n return _get_device_cupy_or_numpy(int_device_spec)\n\n if device_spec.startswith('@'):\n # '@module:...'\n mod_name, colon, precise_spec = device_spec[1:].partition(':')\n if mod_name == 'numpy':\n if not colon:\n return _cpu.CpuDevice()\n elif mod_name == 'cupy':\n if colon:\n return cuda.GpuDevice.from_device_id(int(precise_spec))\n elif mod_name == 'intel64':\n if not colon:\n return intel64.Intel64Device()\n\n elif chainerx.is_available():\n return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))\n\n raise ValueError('Invalid device specifier: {}'.format(device_spec))\n\n\ndef _get_device_cupy_or_numpy(device_spec):\n # legacy spec of (gpu) device\n if device_spec >= 0:\n return cuda.GpuDevice.from_device_id(device_spec)\n else:\n return _cpu.CpuDevice()\n\n\ndef using_device(device_spec):\n \"\"\"Context manager to apply the thread-local device state.\n\n Args:\n device_spec (object): Device specifier. See :func:`chainer.get_device`\n for details.\n\n .. admonition:: Example\n\n .. testcode::\n :skipif: doctest_helper.skipif_not_enough_cuda_devices(2)\n\n with chainer.using_device('@cupy:1'):\n a = cupy.empty((3, 2))\n\n assert a.device.id == 1\n\n \"\"\"\n\n # TODO(niboshi): Set default device (once this concept is introduced in\n # Chainer).\n device = get_device(device_spec)\n return device.create_context()\n\n\ndef get_array_module(*args):\n \"\"\"Gets an appropriate NumPy-compatible module to process arguments\n\n This function will return their data arrays' array module for\n :class:`~chainer.Variable` arguments.\n\n Args:\n args: Values to determine whether NumPy, CuPy, or ChainerX should be\n used.\n\n Returns:\n module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based\n on the types of the arguments.\n\n \"\"\"\n is_chainerx_available = chainerx.is_available()\n if is_chainerx_available or cuda.available:\n arrays = []\n for arg in args:\n # Unwrap arrays\n if isinstance(arg, chainer.variable.Variable):\n array = arg.data\n else:\n array = arg\n if is_chainerx_available and isinstance(array, chainerx.ndarray):\n return chainerx\n arrays.append(array)\n if cuda.available:\n return cuda.cupy.get_array_module(*arrays)\n return numpy\n\n\ndef get_device_from_array(*arrays):\n \"\"\"Gets the device from arrays.\n\n The device on which the given array reside is returned.\n\n .. note::\n\n Unlike :func:`get_array_module`, this method does not recognize\n :class:`~chainer.Variable` objects.\n If you need to get device from the :class:`~chainer.Variable` instance\n ``v``, you need to use ``get_device_from_array(v.array)``.\n\n Args:\n arrays (array or list of arrays):\n Arrays to determine the device. If multiple arrays are given, the\n device correspoinding to the first array which is not NumPy array\n is returned.\n\n Returns:\n chainer.backend.Device: Device instance.\n \"\"\"\n for array in arrays:\n device = GpuDevice.from_array(array)\n if device is not None:\n return device\n\n if isinstance(array, chainerx.ndarray):\n return ChainerxDevice(array.device)\n\n device = Intel64Device.from_array(array)\n if device is not None:\n return device\n\n return CpuDevice()\n", "path": "chainer/backend.py"}]} | 3,247 | 209 |
gh_patches_debug_34299 | rasdani/github-patches | git_diff | quantumlib__Cirq-4642 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
importlib.abc in Python 3.10
**Description of the issue**
In Python 3.10.0, the command `import cirq` fails with the error:
```
class InstrumentedFinder(importlib.abc.MetaPathFinder):
AttributeError: module 'importlib' has no attribute 'abc'. Did you mean: '_abc'?
```
**Workaround**
If one imports `importlib.abc` prior to importing cirq, no error occurs:
```python
from importlib import abc
import cirq
```
**Suggestion**
Probably you should add `from importlib import abc` somewhere in the Сirq's code.
Searching on Google, I've found a similar issue in another project: [grpc/issues/26062](https://github.com/grpc/grpc/issues/26062)
**Cirq version**
0.13.1
</issue>
<code>
[start of cirq-core/cirq/_import.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Callable, List, Optional
16
17 from contextlib import contextmanager
18 import importlib
19 import sys
20
21 # Bug workaround: https://github.com/python/mypy/issues/1498
22 ModuleType = Any
23
24
25 class InstrumentedFinder(importlib.abc.MetaPathFinder):
26 """A module finder used to hook the python import statement."""
27
28 def __init__(
29 self,
30 finder: Any,
31 module_name: str,
32 wrap_module: Callable[[ModuleType], Optional[ModuleType]],
33 after_exec: Callable[[ModuleType], None],
34 ):
35 """A module finder that uses an existing module finder to find a python
36 module spec and intercept the execution of matching modules.
37
38 Replace finders in `sys.meta_path` with instances of this class to
39 instrument import statements.
40
41 Args:
42 finder: The original module finder to wrap.
43 module_name: The fully qualified module name to instrument e.g.
44 `'pkg.submodule'`. Submodules of this are also instrumented.
45 wrap_module: A callback function that takes a module object before
46 it is run and either modifies or replaces it before it is run.
47 The module returned by this function will be executed. If None
48 is returned the module is not executed and may be executed
49 later.
50 after_exec: A callback function that is called with the return value
51 of `wrap_module` after that module was executed if `wrap_module`
52 didn't return None.
53 """
54
55 self.finder = finder
56 self.module_name = module_name
57 self.match_components: List[str] = []
58 if self.module_name:
59 self.match_components = self.module_name.split('.')
60 self.wrap_module = wrap_module
61 self.after_exec = after_exec
62
63 def find_spec(self, fullname: str, path: Any = None, target: Any = None) -> Any:
64 components = fullname.split('.')
65 spec = self.finder.find_spec(fullname, path=path, target=target)
66 if spec is None:
67 return None
68 if components[: len(self.match_components)] == self.match_components:
69 spec = self.wrap_spec(spec)
70 return spec
71
72 def wrap_spec(self, spec: Any) -> Any:
73 spec.loader = InstrumentedLoader(spec.loader, self.wrap_module, self.after_exec)
74 return spec
75
76
77 class InstrumentedLoader(importlib.abc.Loader):
78 """A module loader used to hook the python import statement."""
79
80 def __init__(
81 self,
82 loader: Any,
83 wrap_module: Callable[[ModuleType], Optional[ModuleType]],
84 after_exec: Callable[[ModuleType], None],
85 ):
86 """A module loader that uses an existing module loader and intercepts
87 the execution of a module.
88
89 Use `InstrumentedFinder` to instrument modules with instances of this
90 class.
91
92 Args:
93 loader: The original module loader to wrap.
94 module_name: The fully qualified module name to instrument e.g.
95 `'pkg.submodule'`. Submodules of this are also instrumented.
96 wrap_module: A callback function that takes a module object before
97 it is run and either modifies or replaces it before it is run.
98 The module returned by this function will be executed. If None
99 is returned the module is not executed and may be executed
100 later.
101 after_exec: A callback function that is called with the return value
102 of `wrap_module` after that module was executed if `wrap_module`
103 didn't return None.
104 """
105 self.loader = loader
106 self.wrap_module = wrap_module
107 self.after_exec = after_exec
108
109 def create_module(self, spec: ModuleType) -> ModuleType:
110 return self.loader.create_module(spec)
111
112 def exec_module(self, module: ModuleType) -> None:
113 module = self.wrap_module(module)
114 if module is not None:
115 self.loader.exec_module(module)
116 self.after_exec(module)
117
118
119 @contextmanager
120 def wrap_module_executions(
121 module_name: str,
122 wrap_func: Callable[[ModuleType], Optional[ModuleType]],
123 after_exec: Callable[[ModuleType], None] = lambda m: None,
124 assert_meta_path_unchanged: bool = True,
125 ):
126 """A context manager that hooks python's import machinery within the
127 context.
128
129 `wrap_func` is called before executing the module called `module_name` and
130 any of its submodules. The module returned by `wrap_func` will be executed.
131 """
132
133 def wrap(finder: Any) -> Any:
134 if not hasattr(finder, 'find_spec'):
135 return finder
136 return InstrumentedFinder(finder, module_name, wrap_func, after_exec)
137
138 new_meta_path = [wrap(finder) for finder in sys.meta_path]
139
140 try:
141 orig_meta_path, sys.meta_path = sys.meta_path, new_meta_path
142 yield
143 finally:
144 if assert_meta_path_unchanged:
145 assert sys.meta_path == new_meta_path
146 sys.meta_path = orig_meta_path
147
148
149 @contextmanager
150 def delay_import(module_name: str):
151 """A context manager that allows the module or submodule named `module_name`
152 to be imported without the contents of the module executing until the
153 context manager exits.
154 """
155 delay = True
156 execute_list = []
157
158 def wrap_func(module: ModuleType) -> Optional[ModuleType]:
159 if delay:
160 execute_list.append(module)
161 return None # Don't allow the module to be executed yet
162 return module # Now allow the module to be executed
163
164 with wrap_module_executions(module_name, wrap_func):
165 importlib.import_module(module_name)
166
167 yield # Run the body of the context
168
169 delay = False
170 for module in execute_list:
171 module.__loader__.exec_module(module) # Calls back into wrap_func
172
[end of cirq-core/cirq/_import.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq-core/cirq/_import.py b/cirq-core/cirq/_import.py
--- a/cirq-core/cirq/_import.py
+++ b/cirq-core/cirq/_import.py
@@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Callable, List, Optional
+from typing import Any, Callable, cast, List, Optional
+from types import ModuleType
+from importlib.machinery import ModuleSpec
+from importlib.abc import Loader
from contextlib import contextmanager
import importlib
+from importlib import abc
import sys
-# Bug workaround: https://github.com/python/mypy/issues/1498
-ModuleType = Any
-
-class InstrumentedFinder(importlib.abc.MetaPathFinder):
+class InstrumentedFinder(abc.MetaPathFinder):
"""A module finder used to hook the python import statement."""
def __init__(
@@ -74,7 +75,7 @@
return spec
-class InstrumentedLoader(importlib.abc.Loader):
+class InstrumentedLoader(abc.Loader):
"""A module loader used to hook the python import statement."""
def __init__(
@@ -106,12 +107,12 @@
self.wrap_module = wrap_module
self.after_exec = after_exec
- def create_module(self, spec: ModuleType) -> ModuleType:
+ def create_module(self, spec: ModuleSpec) -> ModuleType:
return self.loader.create_module(spec)
def exec_module(self, module: ModuleType) -> None:
- module = self.wrap_module(module)
- if module is not None:
+ wrapped_module = self.wrap_module(module)
+ if wrapped_module is not None:
self.loader.exec_module(module)
self.after_exec(module)
@@ -168,4 +169,5 @@
delay = False
for module in execute_list:
- module.__loader__.exec_module(module) # Calls back into wrap_func
+ if module.__loader__ is not None and hasattr(module.__loader__, 'exec_module'):
+ cast(Loader, module.__loader__).exec_module(module) # Calls back into wrap_func
| {"golden_diff": "diff --git a/cirq-core/cirq/_import.py b/cirq-core/cirq/_import.py\n--- a/cirq-core/cirq/_import.py\n+++ b/cirq-core/cirq/_import.py\n@@ -12,17 +12,18 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Any, Callable, List, Optional\n+from typing import Any, Callable, cast, List, Optional\n+from types import ModuleType\n+from importlib.machinery import ModuleSpec\n+from importlib.abc import Loader\n \n from contextlib import contextmanager\n import importlib\n+from importlib import abc\n import sys\n \n-# Bug workaround: https://github.com/python/mypy/issues/1498\n-ModuleType = Any\n \n-\n-class InstrumentedFinder(importlib.abc.MetaPathFinder):\n+class InstrumentedFinder(abc.MetaPathFinder):\n \"\"\"A module finder used to hook the python import statement.\"\"\"\n \n def __init__(\n@@ -74,7 +75,7 @@\n return spec\n \n \n-class InstrumentedLoader(importlib.abc.Loader):\n+class InstrumentedLoader(abc.Loader):\n \"\"\"A module loader used to hook the python import statement.\"\"\"\n \n def __init__(\n@@ -106,12 +107,12 @@\n self.wrap_module = wrap_module\n self.after_exec = after_exec\n \n- def create_module(self, spec: ModuleType) -> ModuleType:\n+ def create_module(self, spec: ModuleSpec) -> ModuleType:\n return self.loader.create_module(spec)\n \n def exec_module(self, module: ModuleType) -> None:\n- module = self.wrap_module(module)\n- if module is not None:\n+ wrapped_module = self.wrap_module(module)\n+ if wrapped_module is not None:\n self.loader.exec_module(module)\n self.after_exec(module)\n \n@@ -168,4 +169,5 @@\n \n delay = False\n for module in execute_list:\n- module.__loader__.exec_module(module) # Calls back into wrap_func\n+ if module.__loader__ is not None and hasattr(module.__loader__, 'exec_module'):\n+ cast(Loader, module.__loader__).exec_module(module) # Calls back into wrap_func\n", "issue": "importlib.abc in Python 3.10\n**Description of the issue**\r\n\r\nIn Python 3.10.0, the command `import cirq` fails with the error:\r\n\r\n```\r\nclass InstrumentedFinder(importlib.abc.MetaPathFinder):\r\nAttributeError: module 'importlib' has no attribute 'abc'. Did you mean: '_abc'? \r\n```\r\n\r\n**Workaround**\r\n\r\nIf one imports `importlib.abc` prior to importing cirq, no error occurs:\r\n\r\n```python\r\nfrom importlib import abc\r\nimport cirq\r\n```\r\n\r\n**Suggestion**\r\n\r\nProbably you should add `from importlib import abc` somewhere in the \u0421irq's code.\r\n\r\nSearching on Google, I've found a similar issue in another project: [grpc/issues/26062](https://github.com/grpc/grpc/issues/26062)\r\n\r\n**Cirq version**\r\n0.13.1\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Callable, List, Optional\n\nfrom contextlib import contextmanager\nimport importlib\nimport sys\n\n# Bug workaround: https://github.com/python/mypy/issues/1498\nModuleType = Any\n\n\nclass InstrumentedFinder(importlib.abc.MetaPathFinder):\n \"\"\"A module finder used to hook the python import statement.\"\"\"\n\n def __init__(\n self,\n finder: Any,\n module_name: str,\n wrap_module: Callable[[ModuleType], Optional[ModuleType]],\n after_exec: Callable[[ModuleType], None],\n ):\n \"\"\"A module finder that uses an existing module finder to find a python\n module spec and intercept the execution of matching modules.\n\n Replace finders in `sys.meta_path` with instances of this class to\n instrument import statements.\n\n Args:\n finder: The original module finder to wrap.\n module_name: The fully qualified module name to instrument e.g.\n `'pkg.submodule'`. Submodules of this are also instrumented.\n wrap_module: A callback function that takes a module object before\n it is run and either modifies or replaces it before it is run.\n The module returned by this function will be executed. If None\n is returned the module is not executed and may be executed\n later.\n after_exec: A callback function that is called with the return value\n of `wrap_module` after that module was executed if `wrap_module`\n didn't return None.\n \"\"\"\n\n self.finder = finder\n self.module_name = module_name\n self.match_components: List[str] = []\n if self.module_name:\n self.match_components = self.module_name.split('.')\n self.wrap_module = wrap_module\n self.after_exec = after_exec\n\n def find_spec(self, fullname: str, path: Any = None, target: Any = None) -> Any:\n components = fullname.split('.')\n spec = self.finder.find_spec(fullname, path=path, target=target)\n if spec is None:\n return None\n if components[: len(self.match_components)] == self.match_components:\n spec = self.wrap_spec(spec)\n return spec\n\n def wrap_spec(self, spec: Any) -> Any:\n spec.loader = InstrumentedLoader(spec.loader, self.wrap_module, self.after_exec)\n return spec\n\n\nclass InstrumentedLoader(importlib.abc.Loader):\n \"\"\"A module loader used to hook the python import statement.\"\"\"\n\n def __init__(\n self,\n loader: Any,\n wrap_module: Callable[[ModuleType], Optional[ModuleType]],\n after_exec: Callable[[ModuleType], None],\n ):\n \"\"\"A module loader that uses an existing module loader and intercepts\n the execution of a module.\n\n Use `InstrumentedFinder` to instrument modules with instances of this\n class.\n\n Args:\n loader: The original module loader to wrap.\n module_name: The fully qualified module name to instrument e.g.\n `'pkg.submodule'`. Submodules of this are also instrumented.\n wrap_module: A callback function that takes a module object before\n it is run and either modifies or replaces it before it is run.\n The module returned by this function will be executed. If None\n is returned the module is not executed and may be executed\n later.\n after_exec: A callback function that is called with the return value\n of `wrap_module` after that module was executed if `wrap_module`\n didn't return None.\n \"\"\"\n self.loader = loader\n self.wrap_module = wrap_module\n self.after_exec = after_exec\n\n def create_module(self, spec: ModuleType) -> ModuleType:\n return self.loader.create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n module = self.wrap_module(module)\n if module is not None:\n self.loader.exec_module(module)\n self.after_exec(module)\n\n\n@contextmanager\ndef wrap_module_executions(\n module_name: str,\n wrap_func: Callable[[ModuleType], Optional[ModuleType]],\n after_exec: Callable[[ModuleType], None] = lambda m: None,\n assert_meta_path_unchanged: bool = True,\n):\n \"\"\"A context manager that hooks python's import machinery within the\n context.\n\n `wrap_func` is called before executing the module called `module_name` and\n any of its submodules. The module returned by `wrap_func` will be executed.\n \"\"\"\n\n def wrap(finder: Any) -> Any:\n if not hasattr(finder, 'find_spec'):\n return finder\n return InstrumentedFinder(finder, module_name, wrap_func, after_exec)\n\n new_meta_path = [wrap(finder) for finder in sys.meta_path]\n\n try:\n orig_meta_path, sys.meta_path = sys.meta_path, new_meta_path\n yield\n finally:\n if assert_meta_path_unchanged:\n assert sys.meta_path == new_meta_path\n sys.meta_path = orig_meta_path\n\n\n@contextmanager\ndef delay_import(module_name: str):\n \"\"\"A context manager that allows the module or submodule named `module_name`\n to be imported without the contents of the module executing until the\n context manager exits.\n \"\"\"\n delay = True\n execute_list = []\n\n def wrap_func(module: ModuleType) -> Optional[ModuleType]:\n if delay:\n execute_list.append(module)\n return None # Don't allow the module to be executed yet\n return module # Now allow the module to be executed\n\n with wrap_module_executions(module_name, wrap_func):\n importlib.import_module(module_name)\n\n yield # Run the body of the context\n\n delay = False\n for module in execute_list:\n module.__loader__.exec_module(module) # Calls back into wrap_func\n", "path": "cirq-core/cirq/_import.py"}]} | 2,528 | 496 |
gh_patches_debug_24558 | rasdani/github-patches | git_diff | marshmallow-code__webargs-43 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pyramid parser use_kwargs throws exception when used
The following code using the pyramid parser throws an exception:
``` python
@parser.use_kwargs({'myvalue': Arg(int)})
def baz(request, myvalue):
return {'myvalue': myvalue}
```
The exception:
```
kwargs['as_kwargs'] = True
> return self.use_args(*args, **kwargs)
E TypeError: use_args() got an unexpected keyword argument 'as_kwargs'
```
Pyramid parser use_kwargs throws exception when used
The following code using the pyramid parser throws an exception:
``` python
@parser.use_kwargs({'myvalue': Arg(int)})
def baz(request, myvalue):
return {'myvalue': myvalue}
```
The exception:
```
kwargs['as_kwargs'] = True
> return self.use_args(*args, **kwargs)
E TypeError: use_args() got an unexpected keyword argument 'as_kwargs'
```
</issue>
<code>
[start of webargs/pyramidparser.py]
1 # -*- coding: utf-8 -*-
2 """Pyramid request argument parsing.
3
4 Example usage: ::
5
6 from wsgiref.simple_server import make_server
7 from pyramid.config import Configurator
8 from pyramid.response import Response
9 from webargs import Arg
10 from webargs.pyramidparser import use_args
11
12 hello_args = {
13 'name': Arg(str, default='World')
14 }
15
16 @use_args(hello_args)
17 def hello_world(request, args):
18 return Response('Hello ' + args['name'])
19
20 if __name__ == '__main__':
21 config = Configurator()
22 config.add_route('hello', '/')
23 config.add_view(hello_world, route_name='hello')
24 app = config.make_wsgi_app()
25 server = make_server('0.0.0.0', 6543, app)
26 server.serve_forever()
27 """
28 import functools
29 import logging
30
31 from webob.multidict import MultiDict
32 from pyramid.httpexceptions import exception_response
33
34 from webargs import core
35 from webargs.core import text_type
36
37 logger = logging.getLogger(__name__)
38
39 class PyramidParser(core.Parser):
40 """Pyramid request argument parser."""
41
42 def parse_querystring(self, req, name, arg):
43 """Pull a querystring value from the request."""
44 return core.get_value(req.GET, name, arg.multiple)
45
46 def parse_form(self, req, name, arg):
47 """Pull a form value from the request."""
48 return core.get_value(req.POST, name, arg.multiple)
49
50 def parse_json(self, req, name, arg):
51 """Pull a json value from the request."""
52 try:
53 json_data = req.json_body
54 except ValueError:
55 return core.Missing
56
57 return core.get_value(json_data, name, arg.multiple)
58
59 def parse_cookies(self, req, name, arg):
60 """Pull the value from the cookiejar."""
61 return core.get_value(req.cookies, name, arg.multiple)
62
63 def parse_headers(self, req, name, arg):
64 """Pull a value from the header data."""
65 return core.get_value(req.headers, name, arg.multiple)
66
67 def parse_files(self, req, name, arg):
68 """Pull a file from the request."""
69 files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))
70 return core.get_value(MultiDict(files), name, arg.multiple)
71
72 def handle_error(self, error):
73 """Handles errors during parsing. Aborts the current HTTP request and
74 responds with a 400 error.
75 """
76 logger.error(error)
77 status_code = getattr(error, 'status_code', 400)
78 data = getattr(error, 'data', {})
79 raise exception_response(status_code, detail=text_type(error), **data)
80
81 def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
82 validate=None):
83 """Decorator that injects parsed arguments into a view callable.
84 Supports the *Class-based View* pattern where `request` is saved as an instance
85 attribute on a view class.
86
87 :param dict argmap: Dictionary of argument_name:Arg object pairs.
88 :param req: The request object to parse
89 :param tuple locations: Where on the request to search for values.
90 :param callable validate:
91 Validation function that receives the dictionary of parsed arguments.
92 If the function returns ``False``, the parser will raise a
93 :exc:`ValidationError`.
94 """
95 def decorator(func):
96 @functools.wraps(func)
97 def wrapper(obj, *args, **kwargs):
98 # The first argument is either `self` or `request`
99 try: # get self.request
100 request = obj.request
101 except AttributeError: # first arg is request
102 request = obj
103 parsed_args = self.parse(argmap, req=request, locations=locations,
104 validate=None)
105 return func(obj, parsed_args, *args, **kwargs)
106 return wrapper
107 return decorator
108
109 parser = PyramidParser()
110 use_args = parser.use_args
111 use_kwargs = parser.use_kwargs
112
[end of webargs/pyramidparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webargs/pyramidparser.py b/webargs/pyramidparser.py
--- a/webargs/pyramidparser.py
+++ b/webargs/pyramidparser.py
@@ -79,7 +79,7 @@
raise exception_response(status_code, detail=text_type(error), **data)
def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
- validate=None):
+ as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view callable.
Supports the *Class-based View* pattern where `request` is saved as an instance
attribute on a view class.
@@ -102,7 +102,11 @@
request = obj
parsed_args = self.parse(argmap, req=request, locations=locations,
validate=None)
- return func(obj, parsed_args, *args, **kwargs)
+ if as_kwargs:
+ kwargs.update(parsed_args)
+ return func(obj, *args, **kwargs)
+ else:
+ return func(obj, parsed_args, *args, **kwargs)
return wrapper
return decorator
| {"golden_diff": "diff --git a/webargs/pyramidparser.py b/webargs/pyramidparser.py\n--- a/webargs/pyramidparser.py\n+++ b/webargs/pyramidparser.py\n@@ -79,7 +79,7 @@\n raise exception_response(status_code, detail=text_type(error), **data)\n \n def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,\n- validate=None):\n+ as_kwargs=False, validate=None):\n \"\"\"Decorator that injects parsed arguments into a view callable.\n Supports the *Class-based View* pattern where `request` is saved as an instance\n attribute on a view class.\n@@ -102,7 +102,11 @@\n request = obj\n parsed_args = self.parse(argmap, req=request, locations=locations,\n validate=None)\n- return func(obj, parsed_args, *args, **kwargs)\n+ if as_kwargs:\n+ kwargs.update(parsed_args)\n+ return func(obj, *args, **kwargs)\n+ else:\n+ return func(obj, parsed_args, *args, **kwargs)\n return wrapper\n return decorator\n", "issue": "Pyramid parser use_kwargs throws exception when used\nThe following code using the pyramid parser throws an exception:\n\n``` python\[email protected]_kwargs({'myvalue': Arg(int)})\ndef baz(request, myvalue):\n return {'myvalue': myvalue}\n```\n\nThe exception:\n\n```\n kwargs['as_kwargs'] = True\n> return self.use_args(*args, **kwargs)\nE TypeError: use_args() got an unexpected keyword argument 'as_kwargs'\n```\n\nPyramid parser use_kwargs throws exception when used\nThe following code using the pyramid parser throws an exception:\n\n``` python\[email protected]_kwargs({'myvalue': Arg(int)})\ndef baz(request, myvalue):\n return {'myvalue': myvalue}\n```\n\nThe exception:\n\n```\n kwargs['as_kwargs'] = True\n> return self.use_args(*args, **kwargs)\nE TypeError: use_args() got an unexpected keyword argument 'as_kwargs'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Pyramid request argument parsing.\n\nExample usage: ::\n\n from wsgiref.simple_server import make_server\n from pyramid.config import Configurator\n from pyramid.response import Response\n from webargs import Arg\n from webargs.pyramidparser import use_args\n\n hello_args = {\n 'name': Arg(str, default='World')\n }\n\n @use_args(hello_args)\n def hello_world(request, args):\n return Response('Hello ' + args['name'])\n\n if __name__ == '__main__':\n config = Configurator()\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n\"\"\"\nimport functools\nimport logging\n\nfrom webob.multidict import MultiDict\nfrom pyramid.httpexceptions import exception_response\n\nfrom webargs import core\nfrom webargs.core import text_type\n\nlogger = logging.getLogger(__name__)\n\nclass PyramidParser(core.Parser):\n \"\"\"Pyramid request argument parser.\"\"\"\n\n def parse_querystring(self, req, name, arg):\n \"\"\"Pull a querystring value from the request.\"\"\"\n return core.get_value(req.GET, name, arg.multiple)\n\n def parse_form(self, req, name, arg):\n \"\"\"Pull a form value from the request.\"\"\"\n return core.get_value(req.POST, name, arg.multiple)\n\n def parse_json(self, req, name, arg):\n \"\"\"Pull a json value from the request.\"\"\"\n try:\n json_data = req.json_body\n except ValueError:\n return core.Missing\n\n return core.get_value(json_data, name, arg.multiple)\n\n def parse_cookies(self, req, name, arg):\n \"\"\"Pull the value from the cookiejar.\"\"\"\n return core.get_value(req.cookies, name, arg.multiple)\n\n def parse_headers(self, req, name, arg):\n \"\"\"Pull a value from the header data.\"\"\"\n return core.get_value(req.headers, name, arg.multiple)\n\n def parse_files(self, req, name, arg):\n \"\"\"Pull a file from the request.\"\"\"\n files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))\n return core.get_value(MultiDict(files), name, arg.multiple)\n\n def handle_error(self, error):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 400 error.\n \"\"\"\n logger.error(error)\n status_code = getattr(error, 'status_code', 400)\n data = getattr(error, 'data', {})\n raise exception_response(status_code, detail=text_type(error), **data)\n\n def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,\n validate=None):\n \"\"\"Decorator that injects parsed arguments into a view callable.\n Supports the *Class-based View* pattern where `request` is saved as an instance\n attribute on a view class.\n\n :param dict argmap: Dictionary of argument_name:Arg object pairs.\n :param req: The request object to parse\n :param tuple locations: Where on the request to search for values.\n :param callable validate:\n Validation function that receives the dictionary of parsed arguments.\n If the function returns ``False``, the parser will raise a\n :exc:`ValidationError`.\n \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def wrapper(obj, *args, **kwargs):\n # The first argument is either `self` or `request`\n try: # get self.request\n request = obj.request\n except AttributeError: # first arg is request\n request = obj\n parsed_args = self.parse(argmap, req=request, locations=locations,\n validate=None)\n return func(obj, parsed_args, *args, **kwargs)\n return wrapper\n return decorator\n\nparser = PyramidParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "webargs/pyramidparser.py"}]} | 1,842 | 244 |
gh_patches_debug_2647 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-1312 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue when attempting to sync tiered Price Model in 2.4.2
**Describe the bug**
It looks like 9bd896ffd944e809b95abae884a2149dc8a79f27 introduced a regression when trying to sync a tiered Price model. Probably Price is not the only model affected.
Check out this trace:
```
$ ./manage.py djstripe_sync_models Price
Syncing Price:
INFO stripe.log_info:64- message='Request to Stripe api' method=get path=https://api.stripe.com/v1/prices?expand[0]=data.tiers
INFO stripe.log_info:64- message='Stripe API response' path=https://api.stripe.com/v1/prices?expand[0]=data.tiers response_code=200
id=price_1IFltoFz0jfFqjGsm5fbXWt5, pk=6 (xxx)
id=price_1IFe29Fz0jfFqjGsTpBrPQql, pk=1 (xxx)
id=price_1IFe29Fz0jfFqjGslZM7rvu1, pk=2 (xxx)
id=price_1IFe28Fz0jfFqjGsM0SIOAa6, pk=3 (xxx)
id=price_1IFe27Fz0jfFqjGsEN4c0MxR, pk=4 (xxx)
id=price_1IFe23Fz0jfFqjGsbFrlPDSi, pk=5 (xxx)
INFO stripe.log_info:64- message='Request to Stripe api' method=get path=https://api.stripe.com/v1/prices
INFO stripe.log_info:64- message='Stripe API response' path=https://api.stripe.com/v1/prices response_code=200
id=price_1IFltoFz0jfFqjGsm5fbXWt5, pk=6 (xxx)
id=price_1IFe29Fz0jfFqjGsTpBrPQql, pk=1 (xxx)
id=price_1IFe29Fz0jfFqjGslZM7rvu1, pk=2 (xxx)
id=price_1IFe28Fz0jfFqjGsM0SIOAa6, pk=3 (xxx)
id=price_1IFe27Fz0jfFqjGsEN4c0MxR, pk=4 (xxx)
id=price_1IFe23Fz0jfFqjGsbFrlPDSi, pk=5 (xxx)
Synced 12 Price
```
The Price objects are synced twice. The first time with the tiers attribute expanded and the second time without expanding it and overwriting it, so the final object doesn't include tiers.
**Software versions**
- dj-stripe version: 2.4.2
- Python version: 3.7
- Django version: 3.0.11
- Stripe API version: 2.55
- Database type and version: postgresql 10.10
**Steps To Reproduce**
1. Create tiered Price and add tiers in Stripe Dashboard
2. Sync Price models with manage command
**Can you reproduce the issue with the latest version of master?**
Yes, both 2.4.2 and master are affected (2.4.1 is not affected)
**Expected Behavior**
The Price Model should have the tiers JSONField object populated.
</issue>
<code>
[start of djstripe/management/commands/djstripe_sync_models.py]
1 from typing import List
2
3 from django.apps import apps
4 from django.core.management.base import BaseCommand, CommandError
5
6 from ... import models, settings
7
8
9 class Command(BaseCommand):
10 """Sync models from stripe."""
11
12 help = "Sync models from stripe."
13
14 def add_arguments(self, parser):
15 parser.add_argument(
16 "args",
17 metavar="ModelName",
18 nargs="*",
19 help="restricts sync to these model names (default is to sync all "
20 "supported models)",
21 )
22
23 def handle(self, *args, **options):
24 app_label = "djstripe"
25 app_config = apps.get_app_config(app_label)
26 model_list = [] # type: List[models.StripeModel]
27
28 if args:
29 for model_label in args:
30 try:
31 model = app_config.get_model(model_label)
32 except LookupError:
33 raise CommandError(
34 "Unknown model: {}.{}".format(app_label, model_label)
35 )
36
37 model_list.append(model)
38 else:
39 model_list = app_config.get_models()
40
41 for model in model_list:
42 self.sync_model(model)
43
44 def _should_sync_model(self, model):
45 if not issubclass(model, models.StripeModel):
46 return False, "not a StripeModel"
47
48 if model.stripe_class is None:
49 return False, "no stripe_class"
50
51 if not hasattr(model.stripe_class, "list"):
52 return False, "no stripe_class.list"
53
54 if model is models.UpcomingInvoice:
55 return False, "Upcoming Invoices are virtual only"
56
57 if not settings.STRIPE_LIVE_MODE:
58 if model is models.ScheduledQueryRun:
59 return False, "only available in live mode"
60
61 return True, ""
62
63 def sync_model(self, model):
64 model_name = model.__name__
65
66 should_sync, reason = self._should_sync_model(model)
67 if not should_sync:
68 self.stdout.write(f"Skipping {model}: {reason}")
69 return
70
71 self.stdout.write("Syncing {}:".format(model_name))
72
73 count = 0
74 for list_kwargs in self.get_list_kwargs(model):
75 try:
76 if model is models.Account:
77 # special case, since own account isn't returned by Account.api_list
78 stripe_obj = models.Account.stripe_class.retrieve(
79 api_key=settings.STRIPE_SECRET_KEY
80 )
81 count += 1
82 djstripe_obj = model.sync_from_stripe_data(stripe_obj)
83 self.stdout.write(
84 " id={id}, pk={pk} ({djstripe_obj})".format(
85 id=djstripe_obj.id,
86 pk=djstripe_obj.pk,
87 djstripe_obj=djstripe_obj,
88 )
89 )
90
91 for stripe_obj in model.api_list(**list_kwargs):
92 count += 1
93 djstripe_obj = model.sync_from_stripe_data(stripe_obj)
94 self.stdout.write(
95 " id={id}, pk={pk} ({djstripe_obj})".format(
96 id=djstripe_obj.id,
97 pk=djstripe_obj.pk,
98 djstripe_obj=djstripe_obj,
99 )
100 )
101
102 except Exception as e:
103 self.stderr.write(str(e))
104
105 if count == 0:
106 self.stdout.write(" (no results)")
107 else:
108 self.stdout.write(
109 " Synced {count} {model_name}".format(
110 count=count, model_name=model_name
111 )
112 )
113
114 def get_list_kwargs(self, model):
115 """
116 Returns a sequence of kwargs dicts to pass to model.api_list
117
118 This allows us to sync models that require parameters to api_list
119
120 :param model:
121 :return: Sequence[dict]
122 """
123 all_list_kwargs = (
124 [{"expand": [f"data.{k}" for k in model.expand_fields]}]
125 if model.expand_fields
126 else []
127 )
128 if model is models.PaymentMethod:
129 # special case
130 all_list_kwargs.extend(
131 (
132 {"customer": stripe_customer.id, "type": "card"}
133 for stripe_customer in models.Customer.api_list()
134 )
135 )
136 elif model is models.SubscriptionItem:
137 all_list_kwargs.extend(
138 (
139 {"subscription": subscription.id}
140 for subscription in models.Subscription.api_list()
141 )
142 )
143 else:
144 all_list_kwargs.append({})
145
146 return all_list_kwargs
147
[end of djstripe/management/commands/djstripe_sync_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/djstripe/management/commands/djstripe_sync_models.py b/djstripe/management/commands/djstripe_sync_models.py
--- a/djstripe/management/commands/djstripe_sync_models.py
+++ b/djstripe/management/commands/djstripe_sync_models.py
@@ -140,7 +140,7 @@
for subscription in models.Subscription.api_list()
)
)
- else:
+ elif not all_list_kwargs:
all_list_kwargs.append({})
return all_list_kwargs
| {"golden_diff": "diff --git a/djstripe/management/commands/djstripe_sync_models.py b/djstripe/management/commands/djstripe_sync_models.py\n--- a/djstripe/management/commands/djstripe_sync_models.py\n+++ b/djstripe/management/commands/djstripe_sync_models.py\n@@ -140,7 +140,7 @@\n for subscription in models.Subscription.api_list()\n )\n )\n- else:\n+ elif not all_list_kwargs:\n all_list_kwargs.append({})\n \n return all_list_kwargs\n", "issue": "Issue when attempting to sync tiered Price Model in 2.4.2\n**Describe the bug**\r\n\r\nIt looks like 9bd896ffd944e809b95abae884a2149dc8a79f27 introduced a regression when trying to sync a tiered Price model. Probably Price is not the only model affected.\r\n\r\nCheck out this trace:\r\n\r\n```\r\n$ ./manage.py djstripe_sync_models Price\r\nSyncing Price:\r\nINFO stripe.log_info:64- message='Request to Stripe api' method=get path=https://api.stripe.com/v1/prices?expand[0]=data.tiers\r\nINFO stripe.log_info:64- message='Stripe API response' path=https://api.stripe.com/v1/prices?expand[0]=data.tiers response_code=200\r\n id=price_1IFltoFz0jfFqjGsm5fbXWt5, pk=6 (xxx)\r\n id=price_1IFe29Fz0jfFqjGsTpBrPQql, pk=1 (xxx)\r\n id=price_1IFe29Fz0jfFqjGslZM7rvu1, pk=2 (xxx)\r\n id=price_1IFe28Fz0jfFqjGsM0SIOAa6, pk=3 (xxx)\r\n id=price_1IFe27Fz0jfFqjGsEN4c0MxR, pk=4 (xxx)\r\n id=price_1IFe23Fz0jfFqjGsbFrlPDSi, pk=5 (xxx)\r\nINFO stripe.log_info:64- message='Request to Stripe api' method=get path=https://api.stripe.com/v1/prices\r\nINFO stripe.log_info:64- message='Stripe API response' path=https://api.stripe.com/v1/prices response_code=200\r\n id=price_1IFltoFz0jfFqjGsm5fbXWt5, pk=6 (xxx)\r\n id=price_1IFe29Fz0jfFqjGsTpBrPQql, pk=1 (xxx)\r\n id=price_1IFe29Fz0jfFqjGslZM7rvu1, pk=2 (xxx)\r\n id=price_1IFe28Fz0jfFqjGsM0SIOAa6, pk=3 (xxx)\r\n id=price_1IFe27Fz0jfFqjGsEN4c0MxR, pk=4 (xxx)\r\n id=price_1IFe23Fz0jfFqjGsbFrlPDSi, pk=5 (xxx)\r\n Synced 12 Price\r\n```\r\n\r\nThe Price objects are synced twice. The first time with the tiers attribute expanded and the second time without expanding it and overwriting it, so the final object doesn't include tiers.\r\n\r\n**Software versions**\r\n- dj-stripe version: 2.4.2\r\n- Python version: 3.7\r\n- Django version: 3.0.11\r\n- Stripe API version: 2.55\r\n- Database type and version: postgresql 10.10\r\n\r\n**Steps To Reproduce**\r\n\r\n1. Create tiered Price and add tiers in Stripe Dashboard\r\n2. Sync Price models with manage command\r\n\r\n**Can you reproduce the issue with the latest version of master?**\r\n\r\nYes, both 2.4.2 and master are affected (2.4.1 is not affected)\r\n\r\n**Expected Behavior**\r\n\r\nThe Price Model should have the tiers JSONField object populated.\n", "before_files": [{"content": "from typing import List\n\nfrom django.apps import apps\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ... import models, settings\n\n\nclass Command(BaseCommand):\n \"\"\"Sync models from stripe.\"\"\"\n\n help = \"Sync models from stripe.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"args\",\n metavar=\"ModelName\",\n nargs=\"*\",\n help=\"restricts sync to these model names (default is to sync all \"\n \"supported models)\",\n )\n\n def handle(self, *args, **options):\n app_label = \"djstripe\"\n app_config = apps.get_app_config(app_label)\n model_list = [] # type: List[models.StripeModel]\n\n if args:\n for model_label in args:\n try:\n model = app_config.get_model(model_label)\n except LookupError:\n raise CommandError(\n \"Unknown model: {}.{}\".format(app_label, model_label)\n )\n\n model_list.append(model)\n else:\n model_list = app_config.get_models()\n\n for model in model_list:\n self.sync_model(model)\n\n def _should_sync_model(self, model):\n if not issubclass(model, models.StripeModel):\n return False, \"not a StripeModel\"\n\n if model.stripe_class is None:\n return False, \"no stripe_class\"\n\n if not hasattr(model.stripe_class, \"list\"):\n return False, \"no stripe_class.list\"\n\n if model is models.UpcomingInvoice:\n return False, \"Upcoming Invoices are virtual only\"\n\n if not settings.STRIPE_LIVE_MODE:\n if model is models.ScheduledQueryRun:\n return False, \"only available in live mode\"\n\n return True, \"\"\n\n def sync_model(self, model):\n model_name = model.__name__\n\n should_sync, reason = self._should_sync_model(model)\n if not should_sync:\n self.stdout.write(f\"Skipping {model}: {reason}\")\n return\n\n self.stdout.write(\"Syncing {}:\".format(model_name))\n\n count = 0\n for list_kwargs in self.get_list_kwargs(model):\n try:\n if model is models.Account:\n # special case, since own account isn't returned by Account.api_list\n stripe_obj = models.Account.stripe_class.retrieve(\n api_key=settings.STRIPE_SECRET_KEY\n )\n count += 1\n djstripe_obj = model.sync_from_stripe_data(stripe_obj)\n self.stdout.write(\n \" id={id}, pk={pk} ({djstripe_obj})\".format(\n id=djstripe_obj.id,\n pk=djstripe_obj.pk,\n djstripe_obj=djstripe_obj,\n )\n )\n\n for stripe_obj in model.api_list(**list_kwargs):\n count += 1\n djstripe_obj = model.sync_from_stripe_data(stripe_obj)\n self.stdout.write(\n \" id={id}, pk={pk} ({djstripe_obj})\".format(\n id=djstripe_obj.id,\n pk=djstripe_obj.pk,\n djstripe_obj=djstripe_obj,\n )\n )\n\n except Exception as e:\n self.stderr.write(str(e))\n\n if count == 0:\n self.stdout.write(\" (no results)\")\n else:\n self.stdout.write(\n \" Synced {count} {model_name}\".format(\n count=count, model_name=model_name\n )\n )\n\n def get_list_kwargs(self, model):\n \"\"\"\n Returns a sequence of kwargs dicts to pass to model.api_list\n\n This allows us to sync models that require parameters to api_list\n\n :param model:\n :return: Sequence[dict]\n \"\"\"\n all_list_kwargs = (\n [{\"expand\": [f\"data.{k}\" for k in model.expand_fields]}]\n if model.expand_fields\n else []\n )\n if model is models.PaymentMethod:\n # special case\n all_list_kwargs.extend(\n (\n {\"customer\": stripe_customer.id, \"type\": \"card\"}\n for stripe_customer in models.Customer.api_list()\n )\n )\n elif model is models.SubscriptionItem:\n all_list_kwargs.extend(\n (\n {\"subscription\": subscription.id}\n for subscription in models.Subscription.api_list()\n )\n )\n else:\n all_list_kwargs.append({})\n\n return all_list_kwargs\n", "path": "djstripe/management/commands/djstripe_sync_models.py"}]} | 2,632 | 117 |
gh_patches_debug_58825 | rasdani/github-patches | git_diff | modin-project__modin-3390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not check ASV benchmarks on test data, where the number of rows is much less than the number of columns
These sizes can be removed because such cases are not used in benchmarking: https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L33 and https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L46
</issue>
<code>
[start of asv_bench/benchmarks/utils/data_shapes.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Define data shapes."""
15
16 import os
17 import json
18
19 from .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE
20
21 RAND_LOW = 0
22 RAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == "omnisci" else 100
23
24 BINARY_OP_DATA_SIZE = {
25 "big": [
26 [[5000, 5000], [5000, 5000]],
27 # the case extremely inefficient
28 # [[20, 500_000], [10, 1_000_000]],
29 [[500_000, 20], [1_000_000, 10]],
30 ],
31 "small": [
32 [[250, 250], [250, 250]],
33 [[20, 10_000], [10, 25_000]],
34 [[10_000, 20], [25_000, 10]],
35 ],
36 }
37 UNARY_OP_DATA_SIZE = {
38 "big": [
39 [5000, 5000],
40 # the case extremely inefficient
41 # [10, 1_000_000],
42 [1_000_000, 10],
43 ],
44 "small": [
45 [250, 250],
46 [10, 10_000],
47 [10_000, 10],
48 ],
49 }
50 SERIES_DATA_SIZE = {
51 "big": [
52 (100_000, 1),
53 ],
54 "small": [
55 (10_000, 1),
56 ],
57 }
58
59
60 OMNISCI_BINARY_OP_DATA_SIZE = {
61 "big": [
62 [[500_000, 20], [1_000_000, 10]],
63 ],
64 "small": [
65 [[10_000, 20], [25_000, 10]],
66 ],
67 }
68 OMNISCI_UNARY_OP_DATA_SIZE = {
69 "big": [
70 [1_000_000, 10],
71 ],
72 "small": [
73 [10_000, 10],
74 ],
75 }
76 OMNISCI_SERIES_DATA_SIZE = {
77 "big": [
78 [10_000_000, 1],
79 ],
80 "small": [
81 [100_000, 1],
82 ],
83 }
84
85 BINARY_SHAPES = (
86 OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
87 if ASV_USE_BACKEND == "omnisci"
88 else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
89 )
90 UNARY_SHAPES = (
91 OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
92 if ASV_USE_BACKEND == "omnisci"
93 else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]
94 )
95 SERIES_SHAPES = (
96 OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]
97 if ASV_USE_BACKEND == "omnisci"
98 else SERIES_DATA_SIZE[ASV_DATASET_SIZE]
99 )
100
101 DEFAULT_GROUPBY_NGROUPS = {
102 "big": [100, "huge_amount_groups"],
103 "small": [5],
104 }
105 GROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]
106
107 _DEFAULT_CONFIG_T = [
108 (
109 UNARY_SHAPES,
110 [
111 # Pandas backend benchmarks
112 "TimeGroupByMultiColumn",
113 "TimeGroupByDefaultAggregations",
114 "TimeGroupByDictionaryAggregation",
115 "TimeSetItem",
116 "TimeInsert",
117 "TimeArithmetic",
118 "TimeSortValues",
119 "TimeDrop",
120 "TimeHead",
121 "TimeFillna",
122 "TimeFillnaDataFrame",
123 "TimeValueCountsFrame",
124 "TimeValueCountsSeries",
125 "TimeIndexing",
126 "TimeMultiIndexing",
127 "TimeResetIndex",
128 "TimeAstype",
129 "TimeDescribe",
130 "TimeProperties",
131 # IO benchmarks
132 "TimeReadCsvSkiprows",
133 "TimeReadCsvTrueFalseValues",
134 "TimeReadCsvNamesDtype",
135 # Scalability benchmarks
136 "TimeFromPandas",
137 "TimeToPandas",
138 # OmniSci backend benchmarks
139 "omnisci.TimeJoin",
140 "omnisci.TimeBinaryOpDataFrame",
141 "omnisci.TimeArithmetic",
142 "omnisci.TimeSortValues",
143 "omnisci.TimeDrop",
144 "omnisci.TimeHead",
145 "omnisci.TimeFillna",
146 "omnisci.TimeIndexing",
147 "omnisci.TimeResetIndex",
148 "omnisci.TimeAstype",
149 "omnisci.TimeDescribe",
150 "omnisci.TimeProperties",
151 "omnisci.TimeGroupByDefaultAggregations",
152 "omnisci.TimeGroupByMultiColumn",
153 # OmniSci backend IO benchmarks
154 "omnisci.TimeReadCsvNames",
155 ],
156 ),
157 (
158 BINARY_SHAPES,
159 [
160 # Pandas backend benchmarks
161 "TimeJoin",
162 "TimeMerge",
163 "TimeConcat",
164 "TimeAppend",
165 "TimeBinaryOp",
166 # OmniSci backend benchmarks
167 "omnisci.TimeMerge",
168 "omnisci.TimeAppend",
169 ],
170 ),
171 (
172 SERIES_SHAPES,
173 [
174 # Pandas backend benchmarks
175 "TimeFillnaSeries",
176 # OmniSci backend benchmarks
177 "omnisci.TimeBinaryOpSeries",
178 "omnisci.TimeValueCountsSeries",
179 ],
180 ),
181 ]
182 DEFAULT_CONFIG = {}
183 for _shape, _names in _DEFAULT_CONFIG_T:
184 DEFAULT_CONFIG.update({_name: _shape for _name in _names})
185
186 CONFIG_FROM_FILE = None
187
188
189 def get_benchmark_shapes(bench_id: str):
190 """
191 Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
192
193 If `bench_id` benchmark is not found in the file, then the default value will
194 be used.
195
196 Parameters
197 ----------
198 bench_id : str
199 Unique benchmark identifier that is used to get shapes.
200
201 Returns
202 -------
203 list
204 Benchmark shapes.
205 """
206 global CONFIG_FROM_FILE
207 if not CONFIG_FROM_FILE:
208 try:
209 from modin.config import AsvDataSizeConfig
210
211 filename = AsvDataSizeConfig.get()
212 except ImportError:
213 filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
214 if filename:
215 # should be json
216 with open(filename) as _f:
217 CONFIG_FROM_FILE = json.load(_f)
218
219 if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:
220 # example: "omnisci.TimeReadCsvNames": [[5555, 55], [3333, 33]]
221 return CONFIG_FROM_FILE[bench_id]
222 return DEFAULT_CONFIG[bench_id]
223
[end of asv_bench/benchmarks/utils/data_shapes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/asv_bench/benchmarks/utils/data_shapes.py b/asv_bench/benchmarks/utils/data_shapes.py
--- a/asv_bench/benchmarks/utils/data_shapes.py
+++ b/asv_bench/benchmarks/utils/data_shapes.py
@@ -30,7 +30,6 @@
],
"small": [
[[250, 250], [250, 250]],
- [[20, 10_000], [10, 25_000]],
[[10_000, 20], [25_000, 10]],
],
}
@@ -43,7 +42,6 @@
],
"small": [
[250, 250],
- [10, 10_000],
[10_000, 10],
],
}
| {"golden_diff": "diff --git a/asv_bench/benchmarks/utils/data_shapes.py b/asv_bench/benchmarks/utils/data_shapes.py\n--- a/asv_bench/benchmarks/utils/data_shapes.py\n+++ b/asv_bench/benchmarks/utils/data_shapes.py\n@@ -30,7 +30,6 @@\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n- [[20, 10_000], [10, 25_000]],\n [[10_000, 20], [25_000, 10]],\n ],\n }\n@@ -43,7 +42,6 @@\n ],\n \"small\": [\n [250, 250],\n- [10, 10_000],\n [10_000, 10],\n ],\n }\n", "issue": "Do not check ASV benchmarks on test data, where the number of rows is much less than the number of columns\nThese sizes can be removed because such cases are not used in benchmarking: https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L33 and https://github.com/modin-project/modin/blob/dd91a78ad3f4b8e3e569215e9c8e540ad099d4a8/asv_bench/benchmarks/utils/data_shapes.py#L46\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Define data shapes.\"\"\"\n\nimport os\nimport json\n\nfrom .compatibility import ASV_USE_BACKEND, ASV_DATASET_SIZE\n\nRAND_LOW = 0\nRAND_HIGH = 1_000_000_000 if ASV_USE_BACKEND == \"omnisci\" else 100\n\nBINARY_OP_DATA_SIZE = {\n \"big\": [\n [[5000, 5000], [5000, 5000]],\n # the case extremely inefficient\n # [[20, 500_000], [10, 1_000_000]],\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[250, 250], [250, 250]],\n [[20, 10_000], [10, 25_000]],\n [[10_000, 20], [25_000, 10]],\n ],\n}\nUNARY_OP_DATA_SIZE = {\n \"big\": [\n [5000, 5000],\n # the case extremely inefficient\n # [10, 1_000_000],\n [1_000_000, 10],\n ],\n \"small\": [\n [250, 250],\n [10, 10_000],\n [10_000, 10],\n ],\n}\nSERIES_DATA_SIZE = {\n \"big\": [\n (100_000, 1),\n ],\n \"small\": [\n (10_000, 1),\n ],\n}\n\n\nOMNISCI_BINARY_OP_DATA_SIZE = {\n \"big\": [\n [[500_000, 20], [1_000_000, 10]],\n ],\n \"small\": [\n [[10_000, 20], [25_000, 10]],\n ],\n}\nOMNISCI_UNARY_OP_DATA_SIZE = {\n \"big\": [\n [1_000_000, 10],\n ],\n \"small\": [\n [10_000, 10],\n ],\n}\nOMNISCI_SERIES_DATA_SIZE = {\n \"big\": [\n [10_000_000, 1],\n ],\n \"small\": [\n [100_000, 1],\n ],\n}\n\nBINARY_SHAPES = (\n OMNISCI_BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else BINARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nUNARY_SHAPES = (\n OMNISCI_UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]\n)\nSERIES_SHAPES = (\n OMNISCI_SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n if ASV_USE_BACKEND == \"omnisci\"\n else SERIES_DATA_SIZE[ASV_DATASET_SIZE]\n)\n\nDEFAULT_GROUPBY_NGROUPS = {\n \"big\": [100, \"huge_amount_groups\"],\n \"small\": [5],\n}\nGROUPBY_NGROUPS = DEFAULT_GROUPBY_NGROUPS[ASV_DATASET_SIZE]\n\n_DEFAULT_CONFIG_T = [\n (\n UNARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeGroupByMultiColumn\",\n \"TimeGroupByDefaultAggregations\",\n \"TimeGroupByDictionaryAggregation\",\n \"TimeSetItem\",\n \"TimeInsert\",\n \"TimeArithmetic\",\n \"TimeSortValues\",\n \"TimeDrop\",\n \"TimeHead\",\n \"TimeFillna\",\n \"TimeFillnaDataFrame\",\n \"TimeValueCountsFrame\",\n \"TimeValueCountsSeries\",\n \"TimeIndexing\",\n \"TimeMultiIndexing\",\n \"TimeResetIndex\",\n \"TimeAstype\",\n \"TimeDescribe\",\n \"TimeProperties\",\n # IO benchmarks\n \"TimeReadCsvSkiprows\",\n \"TimeReadCsvTrueFalseValues\",\n \"TimeReadCsvNamesDtype\",\n # Scalability benchmarks\n \"TimeFromPandas\",\n \"TimeToPandas\",\n # OmniSci backend benchmarks\n \"omnisci.TimeJoin\",\n \"omnisci.TimeBinaryOpDataFrame\",\n \"omnisci.TimeArithmetic\",\n \"omnisci.TimeSortValues\",\n \"omnisci.TimeDrop\",\n \"omnisci.TimeHead\",\n \"omnisci.TimeFillna\",\n \"omnisci.TimeIndexing\",\n \"omnisci.TimeResetIndex\",\n \"omnisci.TimeAstype\",\n \"omnisci.TimeDescribe\",\n \"omnisci.TimeProperties\",\n \"omnisci.TimeGroupByDefaultAggregations\",\n \"omnisci.TimeGroupByMultiColumn\",\n # OmniSci backend IO benchmarks\n \"omnisci.TimeReadCsvNames\",\n ],\n ),\n (\n BINARY_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeJoin\",\n \"TimeMerge\",\n \"TimeConcat\",\n \"TimeAppend\",\n \"TimeBinaryOp\",\n # OmniSci backend benchmarks\n \"omnisci.TimeMerge\",\n \"omnisci.TimeAppend\",\n ],\n ),\n (\n SERIES_SHAPES,\n [\n # Pandas backend benchmarks\n \"TimeFillnaSeries\",\n # OmniSci backend benchmarks\n \"omnisci.TimeBinaryOpSeries\",\n \"omnisci.TimeValueCountsSeries\",\n ],\n ),\n]\nDEFAULT_CONFIG = {}\nfor _shape, _names in _DEFAULT_CONFIG_T:\n DEFAULT_CONFIG.update({_name: _shape for _name in _names})\n\nCONFIG_FROM_FILE = None\n\n\ndef get_benchmark_shapes(bench_id: str):\n \"\"\"\n Get custom benchmark shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.\n\n If `bench_id` benchmark is not found in the file, then the default value will\n be used.\n\n Parameters\n ----------\n bench_id : str\n Unique benchmark identifier that is used to get shapes.\n\n Returns\n -------\n list\n Benchmark shapes.\n \"\"\"\n global CONFIG_FROM_FILE\n if not CONFIG_FROM_FILE:\n try:\n from modin.config import AsvDataSizeConfig\n\n filename = AsvDataSizeConfig.get()\n except ImportError:\n filename = os.environ.get(\"MODIN_ASV_DATASIZE_CONFIG\", None)\n if filename:\n # should be json\n with open(filename) as _f:\n CONFIG_FROM_FILE = json.load(_f)\n\n if CONFIG_FROM_FILE and bench_id in CONFIG_FROM_FILE:\n # example: \"omnisci.TimeReadCsvNames\": [[5555, 55], [3333, 33]]\n return CONFIG_FROM_FILE[bench_id]\n return DEFAULT_CONFIG[bench_id]\n", "path": "asv_bench/benchmarks/utils/data_shapes.py"}]} | 3,012 | 210 |
gh_patches_debug_10387 | rasdani/github-patches | git_diff | WordPress__openverse-api-727 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possibly make `thumbnail` null for audio files without artwork
## Description
<!-- Concisely describe the bug. -->
Currently the frontend tries to fetch thumbnails for all audio files regardless of whether the audio file in question has one or not.
I noticed that the API returns the thumbnail URL for all tracks. That makes sense, but could we improve this to be `null` for audio tracks without artwork? Then we could check the field in the frontend before making a network request.
</issue>
<code>
[start of api/catalog/api/serializers/audio_serializers.py]
1 from rest_framework import serializers
2
3 from elasticsearch_dsl.response import Hit
4
5 from catalog.api.constants.field_order import field_position_map
6 from catalog.api.constants.field_values import AUDIO_CATEGORIES, LENGTHS
7 from catalog.api.docs.media_docs import fields_to_md
8 from catalog.api.models import Audio, AudioReport, AudioSet
9 from catalog.api.serializers.fields import (
10 EnumCharField,
11 SchemableHyperlinkedIdentityField,
12 )
13 from catalog.api.serializers.media_serializers import (
14 MediaReportRequestSerializer,
15 MediaSearchRequestSerializer,
16 MediaSearchSerializer,
17 MediaSerializer,
18 get_hyperlinks_serializer,
19 get_search_request_source_serializer,
20 )
21
22
23 #######################
24 # Request serializers #
25 #######################
26
27
28 AudioSearchRequestSourceSerializer = get_search_request_source_serializer("audio")
29
30
31 class AudioSearchRequestSerializer(
32 AudioSearchRequestSourceSerializer,
33 MediaSearchRequestSerializer,
34 ):
35 """Parse and validate search query string parameters."""
36
37 fields_names = [
38 *MediaSearchRequestSerializer.fields_names,
39 *AudioSearchRequestSourceSerializer.field_names,
40 "category",
41 "length",
42 ]
43 """
44 Keep the fields names in sync with the actual fields below as this list is
45 used to generate Swagger documentation.
46 """
47
48 category = EnumCharField(
49 plural="categories",
50 enum_class=AUDIO_CATEGORIES,
51 required=False,
52 )
53 length = EnumCharField(
54 plural="lengths",
55 enum_class=LENGTHS,
56 required=False,
57 )
58
59
60 class AudioReportRequestSerializer(MediaReportRequestSerializer):
61 class Meta(MediaReportRequestSerializer.Meta):
62 model = AudioReport
63
64
65 ########################
66 # Response serializers #
67 ########################
68
69
70 class AudioSetSerializer(serializers.ModelSerializer):
71 """An audio set, rendered as a part of the ``AudioSerializer`` output."""
72
73 class Meta:
74 model = AudioSet
75 fields = [
76 "title",
77 "foreign_landing_url",
78 "creator",
79 "creator_url",
80 "url",
81 "filesize",
82 "filetype",
83 ]
84
85
86 AudioHyperlinksSerializer = get_hyperlinks_serializer("audio")
87
88
89 class AudioSerializer(AudioHyperlinksSerializer, MediaSerializer):
90 """A single audio file. Used in search results."""
91
92 class Meta:
93 model = Audio
94 fields = sorted( # keep this list ordered logically
95 [
96 *MediaSerializer.Meta.fields,
97 *AudioHyperlinksSerializer.field_names,
98 "genres",
99 "alt_files",
100 "audio_set",
101 "duration",
102 "bit_rate",
103 "sample_rate",
104 "waveform", # hyperlink to the endpoint that generates the waveform
105 "peaks", # waveform peaks, if they have already been generated
106 ],
107 key=lambda val: field_position_map.get(val, 999),
108 )
109 """
110 Keep the fields names in sync with the actual fields below as this list is
111 used to generate Swagger documentation.
112 """
113
114 audio_set = AudioSetSerializer(
115 allow_null=True,
116 help_text="Reference to set of which this track is a part.",
117 read_only=True,
118 )
119
120 waveform = SchemableHyperlinkedIdentityField(
121 read_only=True,
122 view_name="audio-waveform",
123 lookup_field="identifier",
124 help_text="A direct link to the waveform peaks.",
125 )
126
127 # Add-on data
128 peaks = serializers.SerializerMethodField(
129 help_text="The list of peaks used to generate the waveform for the audio."
130 )
131
132 @staticmethod
133 def get_peaks(obj) -> list[int]:
134 if isinstance(obj, Hit):
135 obj = Audio.objects.get(identifier=obj.identifier)
136 return obj.get_waveform()
137
138
139 class AudioSearchSerializer(MediaSearchSerializer):
140 """
141 The full audio search response.
142 This serializer is purely representational and not actually used to
143 serialize the response.
144 """
145
146 results = AudioSerializer(
147 many=True,
148 help_text=(
149 "An array of audios and their details such as "
150 f"{fields_to_md(AudioSerializer.Meta.fields)}."
151 ),
152 )
153
154
155 ##########################
156 # Additional serializers #
157 ##########################
158
159
160 class AudioWaveformSerializer(serializers.Serializer):
161 len = serializers.SerializerMethodField()
162 points = serializers.ListField(
163 child=serializers.FloatField(min_value=0, max_value=1)
164 )
165
166 @staticmethod
167 def get_len(obj) -> int:
168 return len(obj.get("points", []))
169
[end of api/catalog/api/serializers/audio_serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py
--- a/api/catalog/api/serializers/audio_serializers.py
+++ b/api/catalog/api/serializers/audio_serializers.py
@@ -135,6 +135,18 @@
obj = Audio.objects.get(identifier=obj.identifier)
return obj.get_waveform()
+ def to_representation(self, instance):
+ # Get the original representation
+ output = super().to_representation(instance)
+
+ if isinstance(instance, Hit):
+ # TODO: Remove when updating ES indexes
+ audio = Audio.objects.get(identifier=instance.identifier)
+ if not audio.thumbnail:
+ output["thumbnail"] = None
+
+ return output
+
class AudioSearchSerializer(MediaSearchSerializer):
"""
| {"golden_diff": "diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py\n--- a/api/catalog/api/serializers/audio_serializers.py\n+++ b/api/catalog/api/serializers/audio_serializers.py\n@@ -135,6 +135,18 @@\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n \n+ def to_representation(self, instance):\n+ # Get the original representation\n+ output = super().to_representation(instance)\n+\n+ if isinstance(instance, Hit):\n+ # TODO: Remove when updating ES indexes\n+ audio = Audio.objects.get(identifier=instance.identifier)\n+ if not audio.thumbnail:\n+ output[\"thumbnail\"] = None\n+\n+ return output\n+\n \n class AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n", "issue": "Possibly make `thumbnail` null for audio files without artwork\n## Description\r\n<!-- Concisely describe the bug. -->\r\n\r\nCurrently the frontend tries to fetch thumbnails for all audio files regardless of whether the audio file in question has one or not. \r\nI noticed that the API returns the thumbnail URL for all tracks. That makes sense, but could we improve this to be `null` for audio tracks without artwork? Then we could check the field in the frontend before making a network request.\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom elasticsearch_dsl.response import Hit\n\nfrom catalog.api.constants.field_order import field_position_map\nfrom catalog.api.constants.field_values import AUDIO_CATEGORIES, LENGTHS\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import Audio, AudioReport, AudioSet\nfrom catalog.api.serializers.fields import (\n EnumCharField,\n SchemableHyperlinkedIdentityField,\n)\nfrom catalog.api.serializers.media_serializers import (\n MediaReportRequestSerializer,\n MediaSearchRequestSerializer,\n MediaSearchSerializer,\n MediaSerializer,\n get_hyperlinks_serializer,\n get_search_request_source_serializer,\n)\n\n\n#######################\n# Request serializers #\n#######################\n\n\nAudioSearchRequestSourceSerializer = get_search_request_source_serializer(\"audio\")\n\n\nclass AudioSearchRequestSerializer(\n AudioSearchRequestSourceSerializer,\n MediaSearchRequestSerializer,\n):\n \"\"\"Parse and validate search query string parameters.\"\"\"\n\n fields_names = [\n *MediaSearchRequestSerializer.fields_names,\n *AudioSearchRequestSourceSerializer.field_names,\n \"category\",\n \"length\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n category = EnumCharField(\n plural=\"categories\",\n enum_class=AUDIO_CATEGORIES,\n required=False,\n )\n length = EnumCharField(\n plural=\"lengths\",\n enum_class=LENGTHS,\n required=False,\n )\n\n\nclass AudioReportRequestSerializer(MediaReportRequestSerializer):\n class Meta(MediaReportRequestSerializer.Meta):\n model = AudioReport\n\n\n########################\n# Response serializers #\n########################\n\n\nclass AudioSetSerializer(serializers.ModelSerializer):\n \"\"\"An audio set, rendered as a part of the ``AudioSerializer`` output.\"\"\"\n\n class Meta:\n model = AudioSet\n fields = [\n \"title\",\n \"foreign_landing_url\",\n \"creator\",\n \"creator_url\",\n \"url\",\n \"filesize\",\n \"filetype\",\n ]\n\n\nAudioHyperlinksSerializer = get_hyperlinks_serializer(\"audio\")\n\n\nclass AudioSerializer(AudioHyperlinksSerializer, MediaSerializer):\n \"\"\"A single audio file. Used in search results.\"\"\"\n\n class Meta:\n model = Audio\n fields = sorted( # keep this list ordered logically\n [\n *MediaSerializer.Meta.fields,\n *AudioHyperlinksSerializer.field_names,\n \"genres\",\n \"alt_files\",\n \"audio_set\",\n \"duration\",\n \"bit_rate\",\n \"sample_rate\",\n \"waveform\", # hyperlink to the endpoint that generates the waveform\n \"peaks\", # waveform peaks, if they have already been generated\n ],\n key=lambda val: field_position_map.get(val, 999),\n )\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n audio_set = AudioSetSerializer(\n allow_null=True,\n help_text=\"Reference to set of which this track is a part.\",\n read_only=True,\n )\n\n waveform = SchemableHyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-waveform\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the waveform peaks.\",\n )\n\n # Add-on data\n peaks = serializers.SerializerMethodField(\n help_text=\"The list of peaks used to generate the waveform for the audio.\"\n )\n\n @staticmethod\n def get_peaks(obj) -> list[int]:\n if isinstance(obj, Hit):\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n\n\nclass AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n The full audio search response.\n This serializer is purely representational and not actually used to\n serialize the response.\n \"\"\"\n\n results = AudioSerializer(\n many=True,\n help_text=(\n \"An array of audios and their details such as \"\n f\"{fields_to_md(AudioSerializer.Meta.fields)}.\"\n ),\n )\n\n\n##########################\n# Additional serializers #\n##########################\n\n\nclass AudioWaveformSerializer(serializers.Serializer):\n len = serializers.SerializerMethodField()\n points = serializers.ListField(\n child=serializers.FloatField(min_value=0, max_value=1)\n )\n\n @staticmethod\n def get_len(obj) -> int:\n return len(obj.get(\"points\", []))\n", "path": "api/catalog/api/serializers/audio_serializers.py"}]} | 1,968 | 178 |
gh_patches_debug_29543 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-304 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up docs/source/conf.py file
I think we can change the names `Numpyro` -> `NumPyro` there, but I am not sure if the changes will affect the build of the website. So I make this issue.
cc @jpchen @neerajprad
</issue>
<code>
[start of docs/source/conf.py]
1 import os
2 import sys
3
4 import sphinx_rtd_theme
5
6
7 # import pkg_resources
8
9 # -*- coding: utf-8 -*-
10 #
11 # Configuration file for the Sphinx documentation builder.
12 #
13 # This file does only contain a selection of the most common options. For a
14 # full list see the documentation:
15 # http://www.sphinx-doc.org/en/master/config
16
17 # -- Path setup --------------------------------------------------------------
18
19 # If extensions (or modules to document with autodoc) are in another directory,
20 # add these directories to sys.path here. If the directory is relative to the
21 # documentation root, use os.path.abspath to make it absolute, like shown here.
22 #
23 sys.path.insert(0, os.path.abspath('../..'))
24
25
26 os.environ['SPHINX_BUILD'] = '1'
27
28 # HACK: This is to ensure that local functions are documented by sphinx.
29 from numpyro.mcmc import hmc # noqa: E402
30 from numpyro.svi import svi # noqa: E402
31 hmc(None, None)
32 svi(None, None, None, None)
33
34 # -- Project information -----------------------------------------------------
35
36 project = u'Numpyro'
37 copyright = u'2019, Uber Technologies, Inc'
38 author = u'Uber AI Labs'
39
40 # The short X.Y version
41 version = u'0.0'
42 # The full version, including alpha/beta/rc tags
43 release = u'0.0'
44
45
46 # -- General configuration ---------------------------------------------------
47
48 # If your documentation needs a minimal Sphinx version, state it here.
49 #
50 # needs_sphinx = '1.0'
51
52 # Add any Sphinx extension module names here, as strings. They can be
53 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
54 # ones.
55 extensions = [
56 'sphinx.ext.autodoc',
57 'sphinx.ext.doctest',
58 'sphinx.ext.intersphinx',
59 'sphinx.ext.mathjax',
60 'sphinx.ext.viewcode',
61 ]
62
63 # Disable documentation inheritance so as to avoid inheriting
64 # docstrings in a different format, e.g. when the parent class
65 # is a PyTorch class.
66
67 autodoc_inherit_docstrings = False
68
69 # autodoc_default_options = {
70 # 'member-order': 'bysource',
71 # 'show-inheritance': True,
72 # 'special-members': True,
73 # 'undoc-members': True,
74 # 'exclude-members': '__dict__,__module__,__weakref__',
75 # }
76
77 # Add any paths that contain templates here, relative to this directory.
78 templates_path = ['_templates']
79
80 # The suffix(es) of source filenames.
81 # You can specify multiple suffix as a list of string:
82 #
83 # source_suffix = ['.rst', '.md']
84 source_suffix = '.rst'
85
86 # The master toctree document.
87 master_doc = 'index'
88
89 # The language for content autogenerated by Sphinx. Refer to documentation
90 # for a list of supported languages.
91 #
92 # This is also used if you do content translation via gettext catalogs.
93 # Usually you set "language" from the command line for these cases.
94 language = None
95
96 # List of patterns, relative to source directory, that match files and
97 # directories to ignore when looking for source files.
98 # This pattern also affects html_static_path and html_extra_path .
99 exclude_patterns = []
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = 'sphinx'
103
104
105 # do not prepend module name to functions
106 add_module_names = False
107
108 # -- Options for HTML output -------------------------------------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112 #
113 html_theme = "sphinx_rtd_theme"
114 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
115
116 # Theme options are theme-specific and customize the look and feel of a theme
117 # further. For a list of options available for each theme, see the
118 # documentation.
119 #
120 # html_theme_options = {}
121
122 # Add any paths that contain custom static files (such as style sheets) here,
123 # relative to this directory. They are copied after the builtin static files,
124 # so a file named "default.css" will overwrite the builtin "default.css".
125 html_static_path = []
126
127 # Custom sidebar templates, must be a dictionary that maps document names
128 # to template names.
129 #
130 # The default sidebars (for documents that don't match any pattern) are
131 # defined by theme itself. Builtin themes are using these templates by
132 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
133 # 'searchbox.html']``.
134 #
135 # html_sidebars = {}
136
137
138 # -- Options for HTMLHelp output ---------------------------------------------
139
140 # Output file base name for HTML help builder.
141 htmlhelp_basename = 'numpyrodoc'
142
143
144 # -- Options for LaTeX output ------------------------------------------------
145
146 latex_elements = {
147 # The paper size ('letterpaper' or 'a4paper').
148 #
149 # 'papersize': 'letterpaper',
150
151 # The font size ('10pt', '11pt' or '12pt').
152 #
153 # 'pointsize': '10pt',
154
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158
159 # Latex figure (float) alignment
160 #
161 # 'figure_align': 'htbp',
162 }
163
164 # Grouping the document tree into LaTeX files. List of tuples
165 # (source start file, target name, title,
166 # author, documentclass [howto, manual, or own class]).
167 latex_documents = [
168 (master_doc, 'Numpyro.tex', u'Numpyro Documentation', u'Uber AI Labs', 'manual'),
169 ]
170
171 # -- Options for manual page output ------------------------------------------
172
173 # One entry per manual page. List of tuples
174 # (source start file, name, description, authors, manual section).
175 man_pages = [
176 (master_doc, 'Numpyro', u'Numpyro Documentation',
177 [author], 1)
178 ]
179
180 # -- Options for Texinfo output ----------------------------------------------
181
182 # Grouping the document tree into Texinfo files. List of tuples
183 # (source start file, target name, title, author,
184 # dir menu entry, description, category)
185 texinfo_documents = [
186 (master_doc, 'Numpyro', u'Numpyro Documentation',
187 author, 'Numpyro', 'Pyro PPL on Numpy',
188 'Miscellaneous'),
189 ]
190
191
192 # -- Extension configuration -------------------------------------------------
193
194 # -- Options for intersphinx extension ---------------------------------------
195
196 # Example configuration for intersphinx: refer to the Python standard library.
197 intersphinx_mapping = {
198 'python': ('https://docs.python.org/3/', None),
199 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
200 'jax': ('https://jax.readthedocs.io/en/latest/', None),
201 'pyro': ('http://docs.pyro.ai/en/stable/', None),
202 }
203
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -33,7 +33,7 @@
# -- Project information -----------------------------------------------------
-project = u'Numpyro'
+project = u'NumPyro'
copyright = u'2019, Uber Technologies, Inc'
author = u'Uber AI Labs'
@@ -165,7 +165,7 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'Numpyro.tex', u'Numpyro Documentation', u'Uber AI Labs', 'manual'),
+ (master_doc, 'NumPyro.tex', u'NumPyro Documentation', u'Uber AI Labs', 'manual'),
]
# -- Options for manual page output ------------------------------------------
@@ -173,7 +173,7 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- (master_doc, 'Numpyro', u'Numpyro Documentation',
+ (master_doc, 'NumPyro', u'NumPyro Documentation',
[author], 1)
]
@@ -183,8 +183,8 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'Numpyro', u'Numpyro Documentation',
- author, 'Numpyro', 'Pyro PPL on Numpy',
+ (master_doc, 'NumPyro', u'NumPyro Documentation',
+ author, 'NumPyro', 'Pyro PPL on Numpy',
'Miscellaneous'),
]
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -33,7 +33,7 @@\n \n # -- Project information -----------------------------------------------------\n \n-project = u'Numpyro'\n+project = u'NumPyro'\n copyright = u'2019, Uber Technologies, Inc'\n author = u'Uber AI Labs'\n \n@@ -165,7 +165,7 @@\n # (source start file, target name, title,\n # author, documentclass [howto, manual, or own class]).\n latex_documents = [\n- (master_doc, 'Numpyro.tex', u'Numpyro Documentation', u'Uber AI Labs', 'manual'),\n+ (master_doc, 'NumPyro.tex', u'NumPyro Documentation', u'Uber AI Labs', 'manual'),\n ]\n \n # -- Options for manual page output ------------------------------------------\n@@ -173,7 +173,7 @@\n # One entry per manual page. List of tuples\n # (source start file, name, description, authors, manual section).\n man_pages = [\n- (master_doc, 'Numpyro', u'Numpyro Documentation',\n+ (master_doc, 'NumPyro', u'NumPyro Documentation',\n [author], 1)\n ]\n \n@@ -183,8 +183,8 @@\n # (source start file, target name, title, author,\n # dir menu entry, description, category)\n texinfo_documents = [\n- (master_doc, 'Numpyro', u'Numpyro Documentation',\n- author, 'Numpyro', 'Pyro PPL on Numpy',\n+ (master_doc, 'NumPyro', u'NumPyro Documentation',\n+ author, 'NumPyro', 'Pyro PPL on Numpy',\n 'Miscellaneous'),\n ]\n", "issue": "Clean up docs/source/conf.py file\nI think we can change the names `Numpyro` -> `NumPyro` there, but I am not sure if the changes will affect the build of the website. So I make this issue.\r\n\r\ncc @jpchen @neerajprad \n", "before_files": [{"content": "import os\nimport sys\n\nimport sphinx_rtd_theme\n\n\n# import pkg_resources\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nsys.path.insert(0, os.path.abspath('../..'))\n\n\nos.environ['SPHINX_BUILD'] = '1'\n\n# HACK: This is to ensure that local functions are documented by sphinx.\nfrom numpyro.mcmc import hmc # noqa: E402\nfrom numpyro.svi import svi # noqa: E402\nhmc(None, None)\nsvi(None, None, None, None)\n\n# -- Project information -----------------------------------------------------\n\nproject = u'Numpyro'\ncopyright = u'2019, Uber Technologies, Inc'\nauthor = u'Uber AI Labs'\n\n# The short X.Y version\nversion = u'0.0'\n# The full version, including alpha/beta/rc tags\nrelease = u'0.0'\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n]\n\n# Disable documentation inheritance so as to avoid inheriting\n# docstrings in a different format, e.g. when the parent class\n# is a PyTorch class.\n\nautodoc_inherit_docstrings = False\n\n# autodoc_default_options = {\n# 'member-order': 'bysource',\n# 'show-inheritance': True,\n# 'special-members': True,\n# 'undoc-members': True,\n# 'exclude-members': '__dict__,__module__,__weakref__',\n# }\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# do not prepend module name to functions\nadd_module_names = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'numpyrodoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Numpyro.tex', u'Numpyro Documentation', u'Uber AI Labs', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'Numpyro', u'Numpyro Documentation',\n [author], 1)\n]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Numpyro', u'Numpyro Documentation',\n author, 'Numpyro', 'Pyro PPL on Numpy',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'jax': ('https://jax.readthedocs.io/en/latest/', None),\n 'pyro': ('http://docs.pyro.ai/en/stable/', None),\n}\n", "path": "docs/source/conf.py"}]} | 2,584 | 409 |
gh_patches_debug_20491 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-461 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Explicit check when training with share_embeddings and not share_vocab
Hey, Whenever I run training with share_embedding flat I get the following error:
```RuntimeError: cuda runtime error (59) : device-side assert triggered at /opt/conda/conda-bld/pytorch_1502009910772/work/torch/lib/THC/THCCachingHostAllocator.cpp:258```
Any idea what can cause this? how can fix this!
Thank.
</issue>
<code>
[start of onmt/ModelConstructor.py]
1 """
2 This file is for models creation, which consults options
3 and creates each encoder and decoder accordingly.
4 """
5 import torch.nn as nn
6
7 import onmt
8 import onmt.io
9 import onmt.Models
10 import onmt.modules
11 from onmt.Models import NMTModel, MeanEncoder, RNNEncoder, \
12 StdRNNDecoder, InputFeedRNNDecoder
13 from onmt.modules import Embeddings, ImageEncoder, CopyGenerator, \
14 TransformerEncoder, TransformerDecoder, \
15 CNNEncoder, CNNDecoder, AudioEncoder
16
17
18 def make_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
19 """
20 Make an Embeddings instance.
21 Args:
22 opt: the option in current environment.
23 word_dict(Vocab): words dictionary.
24 feature_dicts([Vocab], optional): a list of feature dictionary.
25 for_encoder(bool): make Embeddings for encoder or decoder?
26 """
27 if for_encoder:
28 embedding_dim = opt.src_word_vec_size
29 else:
30 embedding_dim = opt.tgt_word_vec_size
31
32 word_padding_idx = word_dict.stoi[onmt.io.PAD_WORD]
33 num_word_embeddings = len(word_dict)
34
35 feats_padding_idx = [feat_dict.stoi[onmt.io.PAD_WORD]
36 for feat_dict in feature_dicts]
37 num_feat_embeddings = [len(feat_dict) for feat_dict in
38 feature_dicts]
39
40 return Embeddings(embedding_dim,
41 opt.position_encoding,
42 opt.feat_merge,
43 opt.feat_vec_exponent,
44 opt.feat_vec_size,
45 opt.dropout,
46 word_padding_idx,
47 feats_padding_idx,
48 num_word_embeddings,
49 num_feat_embeddings)
50
51
52 def make_encoder(opt, embeddings):
53 """
54 Various encoder dispatcher function.
55 Args:
56 opt: the option in current environment.
57 embeddings (Embeddings): vocab embeddings for this encoder.
58 """
59 if opt.encoder_type == "transformer":
60 return TransformerEncoder(opt.enc_layers, opt.rnn_size,
61 opt.dropout, embeddings)
62 elif opt.encoder_type == "cnn":
63 return CNNEncoder(opt.enc_layers, opt.rnn_size,
64 opt.cnn_kernel_width,
65 opt.dropout, embeddings)
66 elif opt.encoder_type == "mean":
67 return MeanEncoder(opt.enc_layers, embeddings)
68 else:
69 # "rnn" or "brnn"
70 return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
71 opt.rnn_size, opt.dropout, embeddings)
72
73
74 def make_decoder(opt, embeddings):
75 """
76 Various decoder dispatcher function.
77 Args:
78 opt: the option in current environment.
79 embeddings (Embeddings): vocab embeddings for this decoder.
80 """
81 if opt.decoder_type == "transformer":
82 return TransformerDecoder(opt.dec_layers, opt.rnn_size,
83 opt.global_attention, opt.copy_attn,
84 opt.dropout, embeddings)
85 elif opt.decoder_type == "cnn":
86 return CNNDecoder(opt.dec_layers, opt.rnn_size,
87 opt.global_attention, opt.copy_attn,
88 opt.cnn_kernel_width, opt.dropout,
89 embeddings)
90 elif opt.input_feed:
91 return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
92 opt.dec_layers, opt.rnn_size,
93 opt.global_attention,
94 opt.coverage_attn,
95 opt.context_gate,
96 opt.copy_attn,
97 opt.dropout,
98 embeddings)
99 else:
100 return StdRNNDecoder(opt.rnn_type, opt.brnn,
101 opt.dec_layers, opt.rnn_size,
102 opt.global_attention,
103 opt.coverage_attn,
104 opt.context_gate,
105 opt.copy_attn,
106 opt.dropout,
107 embeddings)
108
109
110 def make_base_model(model_opt, fields, gpu, checkpoint=None):
111 """
112 Args:
113 model_opt: the option loaded from checkpoint.
114 fields: `Field` objects for the model.
115 gpu(bool): whether to use gpu.
116 checkpoint: the model gnerated by train phase, or a resumed snapshot
117 model from a stopped training.
118 Returns:
119 the NMTModel.
120 """
121 assert model_opt.model_type in ["text", "img", "audio"], \
122 ("Unsupported model type %s" % (model_opt.model_type))
123
124 # Make encoder.
125 if model_opt.model_type == "text":
126 src_dict = fields["src"].vocab
127 feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')
128 src_embeddings = make_embeddings(model_opt, src_dict,
129 feature_dicts)
130 encoder = make_encoder(model_opt, src_embeddings)
131 elif model_opt.model_type == "img":
132 encoder = ImageEncoder(model_opt.enc_layers,
133 model_opt.brnn,
134 model_opt.rnn_size,
135 model_opt.dropout)
136 elif model_opt.model_type == "audio":
137 encoder = AudioEncoder(model_opt.enc_layers,
138 model_opt.brnn,
139 model_opt.rnn_size,
140 model_opt.dropout,
141 model_opt.sample_rate,
142 model_opt.window_size)
143
144 # Make decoder.
145 tgt_dict = fields["tgt"].vocab
146 # TODO: prepare for a future where tgt features are possible.
147 feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
148 tgt_embeddings = make_embeddings(model_opt, tgt_dict,
149 feature_dicts, for_encoder=False)
150
151 # Share the embedding matrix - preprocess with share_vocab required
152 if model_opt.share_embeddings:
153 tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
154
155 decoder = make_decoder(model_opt, tgt_embeddings)
156
157 # Make NMTModel(= encoder + decoder).
158 model = NMTModel(encoder, decoder)
159 model.model_type = model_opt.model_type
160
161 # Make Generator.
162 if not model_opt.copy_attn:
163 generator = nn.Sequential(
164 nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)),
165 nn.LogSoftmax())
166 if model_opt.share_decoder_embeddings:
167 generator[0].weight = decoder.embeddings.word_lut.weight
168 else:
169 generator = CopyGenerator(model_opt, fields["src"].vocab,
170 fields["tgt"].vocab)
171
172 # Load the model states from checkpoint or initialize them.
173 if checkpoint is not None:
174 print('Loading model parameters.')
175 model.load_state_dict(checkpoint['model'])
176 generator.load_state_dict(checkpoint['generator'])
177 else:
178 if model_opt.param_init != 0.0:
179 print('Intializing model parameters.')
180 for p in model.parameters():
181 p.data.uniform_(-model_opt.param_init, model_opt.param_init)
182 for p in generator.parameters():
183 p.data.uniform_(-model_opt.param_init, model_opt.param_init)
184 if hasattr(model.encoder, 'embeddings'):
185 model.encoder.embeddings.load_pretrained_vectors(
186 model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
187 if hasattr(model.decoder, 'embeddings'):
188 model.decoder.embeddings.load_pretrained_vectors(
189 model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
190
191 # Add generator to model (this registers it as parameter of model).
192 model.generator = generator
193
194 # Make the whole model leverage GPU if indicated to do so.
195 if gpu:
196 model.cuda()
197 else:
198 model.cpu()
199
200 return model
201
[end of onmt/ModelConstructor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onmt/ModelConstructor.py b/onmt/ModelConstructor.py
--- a/onmt/ModelConstructor.py
+++ b/onmt/ModelConstructor.py
@@ -143,13 +143,17 @@
# Make decoder.
tgt_dict = fields["tgt"].vocab
- # TODO: prepare for a future where tgt features are possible.
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
- # Share the embedding matrix - preprocess with share_vocab required
+ # Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
+ # src/tgt vocab should be the same if `-share_vocab` is specified.
+ if src_dict != tgt_dict:
+ raise AssertionError('The `-share_vocab` should be set during '
+ 'preprocess if you use share_embeddings!')
+
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
| {"golden_diff": "diff --git a/onmt/ModelConstructor.py b/onmt/ModelConstructor.py\n--- a/onmt/ModelConstructor.py\n+++ b/onmt/ModelConstructor.py\n@@ -143,13 +143,17 @@\n \n # Make decoder.\n tgt_dict = fields[\"tgt\"].vocab\n- # TODO: prepare for a future where tgt features are possible.\n feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')\n tgt_embeddings = make_embeddings(model_opt, tgt_dict,\n feature_dicts, for_encoder=False)\n \n- # Share the embedding matrix - preprocess with share_vocab required\n+ # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n+ # src/tgt vocab should be the same if `-share_vocab` is specified.\n+ if src_dict != tgt_dict:\n+ raise AssertionError('The `-share_vocab` should be set during '\n+ 'preprocess if you use share_embeddings!')\n+\n tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight\n \n decoder = make_decoder(model_opt, tgt_embeddings)\n", "issue": "Explicit check when training with share_embeddings and not share_vocab\nHey, Whenever I run training with share_embedding flat I get the following error:\r\n\r\n```RuntimeError: cuda runtime error (59) : device-side assert triggered at /opt/conda/conda-bld/pytorch_1502009910772/work/torch/lib/THC/THCCachingHostAllocator.cpp:258```\r\n\r\nAny idea what can cause this? how can fix this!\r\n\r\nThank.\n", "before_files": [{"content": "\"\"\"\nThis file is for models creation, which consults options\nand creates each encoder and decoder accordingly.\n\"\"\"\nimport torch.nn as nn\n\nimport onmt\nimport onmt.io\nimport onmt.Models\nimport onmt.modules\nfrom onmt.Models import NMTModel, MeanEncoder, RNNEncoder, \\\n StdRNNDecoder, InputFeedRNNDecoder\nfrom onmt.modules import Embeddings, ImageEncoder, CopyGenerator, \\\n TransformerEncoder, TransformerDecoder, \\\n CNNEncoder, CNNDecoder, AudioEncoder\n\n\ndef make_embeddings(opt, word_dict, feature_dicts, for_encoder=True):\n \"\"\"\n Make an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): make Embeddings for encoder or decoder?\n \"\"\"\n if for_encoder:\n embedding_dim = opt.src_word_vec_size\n else:\n embedding_dim = opt.tgt_word_vec_size\n\n word_padding_idx = word_dict.stoi[onmt.io.PAD_WORD]\n num_word_embeddings = len(word_dict)\n\n feats_padding_idx = [feat_dict.stoi[onmt.io.PAD_WORD]\n for feat_dict in feature_dicts]\n num_feat_embeddings = [len(feat_dict) for feat_dict in\n feature_dicts]\n\n return Embeddings(embedding_dim,\n opt.position_encoding,\n opt.feat_merge,\n opt.feat_vec_exponent,\n opt.feat_vec_size,\n opt.dropout,\n word_padding_idx,\n feats_padding_idx,\n num_word_embeddings,\n num_feat_embeddings)\n\n\ndef make_encoder(opt, embeddings):\n \"\"\"\n Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.\n \"\"\"\n if opt.encoder_type == \"transformer\":\n return TransformerEncoder(opt.enc_layers, opt.rnn_size,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"cnn\":\n return CNNEncoder(opt.enc_layers, opt.rnn_size,\n opt.cnn_kernel_width,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"mean\":\n return MeanEncoder(opt.enc_layers, embeddings)\n else:\n # \"rnn\" or \"brnn\"\n return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,\n opt.rnn_size, opt.dropout, embeddings)\n\n\ndef make_decoder(opt, embeddings):\n \"\"\"\n Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.\n \"\"\"\n if opt.decoder_type == \"transformer\":\n return TransformerDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.dropout, embeddings)\n elif opt.decoder_type == \"cnn\":\n return CNNDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.cnn_kernel_width, opt.dropout,\n embeddings)\n elif opt.input_feed:\n return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings)\n else:\n return StdRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings)\n\n\ndef make_base_model(model_opt, fields, gpu, checkpoint=None):\n \"\"\"\n Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.\n \"\"\"\n assert model_opt.model_type in [\"text\", \"img\", \"audio\"], \\\n (\"Unsupported model type %s\" % (model_opt.model_type))\n\n # Make encoder.\n if model_opt.model_type == \"text\":\n src_dict = fields[\"src\"].vocab\n feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')\n src_embeddings = make_embeddings(model_opt, src_dict,\n feature_dicts)\n encoder = make_encoder(model_opt, src_embeddings)\n elif model_opt.model_type == \"img\":\n encoder = ImageEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout)\n elif model_opt.model_type == \"audio\":\n encoder = AudioEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout,\n model_opt.sample_rate,\n model_opt.window_size)\n\n # Make decoder.\n tgt_dict = fields[\"tgt\"].vocab\n # TODO: prepare for a future where tgt features are possible.\n feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')\n tgt_embeddings = make_embeddings(model_opt, tgt_dict,\n feature_dicts, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required\n if model_opt.share_embeddings:\n tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight\n\n decoder = make_decoder(model_opt, tgt_embeddings)\n\n # Make NMTModel(= encoder + decoder).\n model = NMTModel(encoder, decoder)\n model.model_type = model_opt.model_type\n\n # Make Generator.\n if not model_opt.copy_attn:\n generator = nn.Sequential(\n nn.Linear(model_opt.rnn_size, len(fields[\"tgt\"].vocab)),\n nn.LogSoftmax())\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n else:\n generator = CopyGenerator(model_opt, fields[\"src\"].vocab,\n fields[\"tgt\"].vocab)\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n print('Loading model parameters.')\n model.load_state_dict(checkpoint['model'])\n generator.load_state_dict(checkpoint['generator'])\n else:\n if model_opt.param_init != 0.0:\n print('Intializing model parameters.')\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if hasattr(model.encoder, 'embeddings'):\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)\n\n # Add generator to model (this registers it as parameter of model).\n model.generator = generator\n\n # Make the whole model leverage GPU if indicated to do so.\n if gpu:\n model.cuda()\n else:\n model.cpu()\n\n return model\n", "path": "onmt/ModelConstructor.py"}]} | 2,654 | 237 |
gh_patches_debug_26809 | rasdani/github-patches | git_diff | edgedb__edgedb-7143 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Group by - ProtocolError: cannot decode Object: expected 3 elements, got 2110812590
Hi
```
# this is working
group EntryItem
by .account;
# this is not working
group EntryItem
using accountCode := .account
by accountCode;
-> ProtocolError: cannot decode Object: expected 3 elements, got 2110812590
```
- EdgeDB Version: 2.9
- EdgeDB CLI Version: 2.2.6+7eabbf9
- OS Version: macOS 12.1
Schema:
```
module default {
type Account {
required link book -> Book;
required property code -> str {
constraint exclusive;
};
property displayCode := .code[0:5] ++ ' ' ++ .code[5:];
required property name -> str;
required link currency -> Currency;
constraint exclusive on ((.book, .code));
}
type EntryItem {
required link entry -> Entry;
required property lineNumber -> int16;
required link account -> Account;
...
constraint exclusive on ((.entry, .lineNumber))
}
}
```
</issue>
<code>
[start of edb/edgeql/desugar_group.py]
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19 """Desugar GROUP queries into internal FOR GROUP queries.
20
21 This code is called by both the model and the real implementation,
22 though if that starts becoming a problem it should just be abandoned.
23 """
24
25 from __future__ import annotations
26
27
28 from typing import Optional, Tuple, AbstractSet, Dict, List
29
30 from edb.common import ast
31 from edb.common import ordered
32 from edb.common.compiler import AliasGenerator
33
34 from edb.edgeql import ast as qlast
35 from edb.edgeql.compiler import astutils
36
37
38 def key_name(s: str) -> str:
39 return s.split('~')[0]
40
41
42 def name_path(name: str) -> qlast.Path:
43 return qlast.Path(steps=[qlast.ObjectRef(name=name)])
44
45
46 def make_free_object(els: Dict[str, qlast.Expr]) -> qlast.Shape:
47 return qlast.Shape(
48 expr=None,
49 elements=[
50 qlast.ShapeElement(
51 expr=qlast.Path(steps=[qlast.Ptr(name=name)]),
52 compexpr=expr
53 )
54 for name, expr in els.items()
55 ],
56 )
57
58
59 def collect_grouping_atoms(
60 els: List[qlast.GroupingElement],
61 ) -> AbstractSet[str]:
62 atoms: ordered.OrderedSet[str] = ordered.OrderedSet()
63
64 def _collect_atom(el: qlast.GroupingAtom) -> None:
65 if isinstance(el, qlast.GroupingIdentList):
66 for at in el.elements:
67 _collect_atom(at)
68
69 else:
70 assert isinstance(el, qlast.ObjectRef)
71 atoms.add(el.name)
72
73 def _collect_el(el: qlast.GroupingElement) -> None:
74 if isinstance(el, qlast.GroupingSets):
75 for sub in el.sets:
76 _collect_el(sub)
77 elif isinstance(el, qlast.GroupingOperation):
78 for at in el.elements:
79 _collect_atom(at)
80 elif isinstance(el, qlast.GroupingSimple):
81 _collect_atom(el.element)
82 else:
83 raise AssertionError('Unknown GroupingElement')
84
85 for el in els:
86 _collect_el(el)
87
88 return atoms
89
90
91 def desugar_group(
92 node: qlast.GroupQuery,
93 aliases: AliasGenerator,
94 ) -> qlast.InternalGroupQuery:
95 assert not isinstance(node, qlast.InternalGroupQuery)
96 alias_map: Dict[str, Tuple[str, qlast.Expr]] = {}
97
98 def rewrite_atom(el: qlast.GroupingAtom) -> qlast.GroupingAtom:
99 if isinstance(el, qlast.ObjectRef):
100 return el
101 elif isinstance(el, qlast.Path):
102 assert isinstance(el.steps[0], qlast.Ptr)
103 ptrname = el.steps[0].name
104 if ptrname not in alias_map:
105 alias = aliases.get(ptrname)
106 alias_map[ptrname] = (alias, el)
107 alias = alias_map[ptrname][0]
108 return qlast.ObjectRef(name=alias)
109 else:
110 return qlast.GroupingIdentList(
111 span=el.span,
112 elements=tuple(rewrite_atom(at) for at in el.elements),
113 )
114
115 def rewrite(el: qlast.GroupingElement) -> qlast.GroupingElement:
116 if isinstance(el, qlast.GroupingSimple):
117 return qlast.GroupingSimple(
118 span=el.span, element=rewrite_atom(el.element))
119 elif isinstance(el, qlast.GroupingSets):
120 return qlast.GroupingSets(
121 span=el.span, sets=[rewrite(s) for s in el.sets])
122 elif isinstance(el, qlast.GroupingOperation):
123 return qlast.GroupingOperation(
124 span=el.span,
125 oper=el.oper,
126 elements=[rewrite_atom(a) for a in el.elements])
127 raise AssertionError
128
129 for using_clause in (node.using or ()):
130 alias_map[using_clause.alias] = (using_clause.alias, using_clause.expr)
131
132 using = node.using[:] if node.using else []
133 by = [rewrite(by_el) for by_el in node.by]
134 for alias, path in alias_map.values():
135 using.append(qlast.AliasedExpr(alias=alias, expr=path))
136
137 actual_keys = collect_grouping_atoms(by)
138
139 g_alias = aliases.get('g')
140 grouping_alias = aliases.get('grouping')
141 output_dict = {
142 'key': make_free_object({
143 name: name_path(alias)
144 for name, (alias, _) in alias_map.items()
145 if alias in actual_keys
146 }),
147 'grouping': qlast.FunctionCall(
148 func='array_unpack',
149 args=[name_path(grouping_alias)],
150 ),
151 'elements': name_path(g_alias),
152 }
153 output_shape = make_free_object(output_dict)
154
155 return qlast.InternalGroupQuery(
156 span=node.span,
157 aliases=node.aliases,
158 subject_alias=node.subject_alias,
159 subject=node.subject,
160 # rewritten parts!
161 using=using,
162 by=by,
163 group_alias=g_alias,
164 grouping_alias=grouping_alias,
165 result=output_shape,
166 from_desugaring=True,
167 )
168
169
170 def _count_alias_uses(
171 node: qlast.Expr,
172 alias: str,
173 ) -> int:
174 uses = 0
175 for child in ast.find_children(node, qlast.Path):
176 match child:
177 case astutils.alias_view((alias2, _)) if alias == alias2:
178 uses += 1
179 return uses
180
181
182 def try_group_rewrite(
183 node: qlast.Query,
184 aliases: AliasGenerator,
185 ) -> Optional[qlast.Query]:
186 """
187 Try to apply some syntactic rewrites of GROUP expressions so we
188 can generate better code.
189
190 The two key desugarings are:
191
192 * Sink a shape into the internal group result
193
194 SELECT (GROUP ...) <shape>
195 [filter-clause] [order-clause] [other clauses]
196 =>
197 SELECT (
198 FOR GROUP ...
199 UNION <igroup-body> <shape>
200 [filter-clause]
201 [order-clause]
202 ) [other clauses]
203
204 * Convert a FOR over a group into just an internal group (and
205 a trivial FOR)
206
207 FOR g in (GROUP ...) UNION <body>
208 =>
209 FOR GROUP ...
210 UNION (
211 FOR g IN (<group-body>)
212 UNION <body>
213 )
214 """
215
216 # Inline trivial uses of aliases bound to a group and then
217 # immediately used, so that we can apply the other optimizations.
218 match node:
219 case qlast.SelectQuery(
220 aliases=[
221 *_,
222 qlast.AliasedExpr(alias=alias, expr=qlast.GroupQuery() as grp)
223 ] as qaliases,
224 result=qlast.Shape(
225 expr=astutils.alias_view((alias2, [])),
226 elements=elements,
227 ) as result,
228 ) if alias == alias2 and _count_alias_uses(result, alias) == 1:
229 node = node.replace(
230 aliases=qaliases[:-1],
231 result=qlast.Shape(expr=grp, elements=elements),
232 )
233
234 case qlast.ForQuery(
235 aliases=[
236 *_,
237 qlast.AliasedExpr(alias=alias, expr=qlast.GroupQuery() as grp)
238 ] as qaliases,
239 iterator=astutils.alias_view((alias2, [])),
240 result=result,
241 ) if alias == alias2 and _count_alias_uses(result, alias) == 0:
242 node = node.replace(
243 aliases=qaliases[:-1],
244 iterator=grp,
245 )
246
247 # Sink shapes into the GROUP
248 if (
249 isinstance(node, qlast.SelectQuery)
250 and isinstance(node.result, qlast.Shape)
251 and isinstance(node.result.expr, qlast.GroupQuery)
252 ):
253 igroup = desugar_group(node.result.expr, aliases)
254 igroup = igroup.replace(result=qlast.Shape(
255 expr=igroup.result, elements=node.result.elements))
256
257 # FILTER gets sunk into the body of the FOR GROUP
258 if node.where or node.orderby:
259 igroup = igroup.replace(
260 # We need to move the result_alias in case
261 # the FILTER depends on it.
262 result_alias=node.result_alias,
263 where=node.where,
264 orderby=node.orderby,
265 )
266
267 return node.replace(
268 result=igroup, result_alias=None, where=None, orderby=None)
269
270 # Eliminate FORs over GROUPs
271 if (
272 isinstance(node, qlast.ForQuery)
273 and isinstance(node.iterator, qlast.GroupQuery)
274 ):
275 igroup = desugar_group(node.iterator, aliases)
276 new_result = qlast.ForQuery(
277 iterator_alias=node.iterator_alias,
278 iterator=igroup.result,
279 result=node.result,
280 )
281 return igroup.replace(result=new_result, aliases=node.aliases)
282
283 return None
284
[end of edb/edgeql/desugar_group.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/edb/edgeql/desugar_group.py b/edb/edgeql/desugar_group.py
--- a/edb/edgeql/desugar_group.py
+++ b/edb/edgeql/desugar_group.py
@@ -27,6 +27,8 @@
from typing import Optional, Tuple, AbstractSet, Dict, List
+from edb import errors
+
from edb.common import ast
from edb.common import ordered
from edb.common.compiler import AliasGenerator
@@ -126,11 +128,22 @@
elements=[rewrite_atom(a) for a in el.elements])
raise AssertionError
+ # The rewrite calls on the grouping elements populate alias_map
+ # with any bindings for pointers the by clause refers to directly.
+ by = [rewrite(by_el) for by_el in node.by]
+
for using_clause in (node.using or ()):
+ if using_clause.alias in alias_map:
+ # TODO: This would be a great place to allow multiple spans!
+ raise errors.QueryError(
+ f"USING clause binds a variable '{using_clause.alias}' "
+ f"but a property with that name is used directly in the BY "
+ f"clause",
+ span=alias_map[using_clause.alias][1].span,
+ )
alias_map[using_clause.alias] = (using_clause.alias, using_clause.expr)
- using = node.using[:] if node.using else []
- by = [rewrite(by_el) for by_el in node.by]
+ using = []
for alias, path in alias_map.values():
using.append(qlast.AliasedExpr(alias=alias, expr=path))
| {"golden_diff": "diff --git a/edb/edgeql/desugar_group.py b/edb/edgeql/desugar_group.py\n--- a/edb/edgeql/desugar_group.py\n+++ b/edb/edgeql/desugar_group.py\n@@ -27,6 +27,8 @@\n \n from typing import Optional, Tuple, AbstractSet, Dict, List\n \n+from edb import errors\n+\n from edb.common import ast\n from edb.common import ordered\n from edb.common.compiler import AliasGenerator\n@@ -126,11 +128,22 @@\n elements=[rewrite_atom(a) for a in el.elements])\n raise AssertionError\n \n+ # The rewrite calls on the grouping elements populate alias_map\n+ # with any bindings for pointers the by clause refers to directly.\n+ by = [rewrite(by_el) for by_el in node.by]\n+\n for using_clause in (node.using or ()):\n+ if using_clause.alias in alias_map:\n+ # TODO: This would be a great place to allow multiple spans!\n+ raise errors.QueryError(\n+ f\"USING clause binds a variable '{using_clause.alias}' \"\n+ f\"but a property with that name is used directly in the BY \"\n+ f\"clause\",\n+ span=alias_map[using_clause.alias][1].span,\n+ )\n alias_map[using_clause.alias] = (using_clause.alias, using_clause.expr)\n \n- using = node.using[:] if node.using else []\n- by = [rewrite(by_el) for by_el in node.by]\n+ using = []\n for alias, path in alias_map.values():\n using.append(qlast.AliasedExpr(alias=alias, expr=path))\n", "issue": "Group by - ProtocolError: cannot decode Object: expected 3 elements, got 2110812590\nHi\r\n\r\n```\r\n# this is working\r\ngroup EntryItem\r\nby .account;\r\n\r\n# this is not working\r\ngroup EntryItem\r\nusing accountCode := .account\r\nby accountCode;\r\n-> ProtocolError: cannot decode Object: expected 3 elements, got 2110812590\r\n```\r\n\r\n- EdgeDB Version: 2.9\r\n- EdgeDB CLI Version: 2.2.6+7eabbf9\r\n- OS Version: macOS 12.1\r\n\r\nSchema:\r\n\r\n```\r\nmodule default {\r\n type Account {\r\n required link book -> Book;\r\n required property code -> str {\r\n constraint exclusive;\r\n };\r\n\r\n property displayCode := .code[0:5] ++ ' ' ++ .code[5:];\r\n required property name -> str;\r\n required link currency -> Currency;\r\n\r\n constraint exclusive on ((.book, .code));\r\n }\r\n\r\n type EntryItem {\r\n required link entry -> Entry;\r\n required property lineNumber -> int16;\r\n\r\n required link account -> Account;\r\n ...\r\n\r\n constraint exclusive on ((.entry, .lineNumber))\r\n }\r\n}\r\n```\r\n\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Desugar GROUP queries into internal FOR GROUP queries.\n\nThis code is called by both the model and the real implementation,\nthough if that starts becoming a problem it should just be abandoned.\n\"\"\"\n\nfrom __future__ import annotations\n\n\nfrom typing import Optional, Tuple, AbstractSet, Dict, List\n\nfrom edb.common import ast\nfrom edb.common import ordered\nfrom edb.common.compiler import AliasGenerator\n\nfrom edb.edgeql import ast as qlast\nfrom edb.edgeql.compiler import astutils\n\n\ndef key_name(s: str) -> str:\n return s.split('~')[0]\n\n\ndef name_path(name: str) -> qlast.Path:\n return qlast.Path(steps=[qlast.ObjectRef(name=name)])\n\n\ndef make_free_object(els: Dict[str, qlast.Expr]) -> qlast.Shape:\n return qlast.Shape(\n expr=None,\n elements=[\n qlast.ShapeElement(\n expr=qlast.Path(steps=[qlast.Ptr(name=name)]),\n compexpr=expr\n )\n for name, expr in els.items()\n ],\n )\n\n\ndef collect_grouping_atoms(\n els: List[qlast.GroupingElement],\n) -> AbstractSet[str]:\n atoms: ordered.OrderedSet[str] = ordered.OrderedSet()\n\n def _collect_atom(el: qlast.GroupingAtom) -> None:\n if isinstance(el, qlast.GroupingIdentList):\n for at in el.elements:\n _collect_atom(at)\n\n else:\n assert isinstance(el, qlast.ObjectRef)\n atoms.add(el.name)\n\n def _collect_el(el: qlast.GroupingElement) -> None:\n if isinstance(el, qlast.GroupingSets):\n for sub in el.sets:\n _collect_el(sub)\n elif isinstance(el, qlast.GroupingOperation):\n for at in el.elements:\n _collect_atom(at)\n elif isinstance(el, qlast.GroupingSimple):\n _collect_atom(el.element)\n else:\n raise AssertionError('Unknown GroupingElement')\n\n for el in els:\n _collect_el(el)\n\n return atoms\n\n\ndef desugar_group(\n node: qlast.GroupQuery,\n aliases: AliasGenerator,\n) -> qlast.InternalGroupQuery:\n assert not isinstance(node, qlast.InternalGroupQuery)\n alias_map: Dict[str, Tuple[str, qlast.Expr]] = {}\n\n def rewrite_atom(el: qlast.GroupingAtom) -> qlast.GroupingAtom:\n if isinstance(el, qlast.ObjectRef):\n return el\n elif isinstance(el, qlast.Path):\n assert isinstance(el.steps[0], qlast.Ptr)\n ptrname = el.steps[0].name\n if ptrname not in alias_map:\n alias = aliases.get(ptrname)\n alias_map[ptrname] = (alias, el)\n alias = alias_map[ptrname][0]\n return qlast.ObjectRef(name=alias)\n else:\n return qlast.GroupingIdentList(\n span=el.span,\n elements=tuple(rewrite_atom(at) for at in el.elements),\n )\n\n def rewrite(el: qlast.GroupingElement) -> qlast.GroupingElement:\n if isinstance(el, qlast.GroupingSimple):\n return qlast.GroupingSimple(\n span=el.span, element=rewrite_atom(el.element))\n elif isinstance(el, qlast.GroupingSets):\n return qlast.GroupingSets(\n span=el.span, sets=[rewrite(s) for s in el.sets])\n elif isinstance(el, qlast.GroupingOperation):\n return qlast.GroupingOperation(\n span=el.span,\n oper=el.oper,\n elements=[rewrite_atom(a) for a in el.elements])\n raise AssertionError\n\n for using_clause in (node.using or ()):\n alias_map[using_clause.alias] = (using_clause.alias, using_clause.expr)\n\n using = node.using[:] if node.using else []\n by = [rewrite(by_el) for by_el in node.by]\n for alias, path in alias_map.values():\n using.append(qlast.AliasedExpr(alias=alias, expr=path))\n\n actual_keys = collect_grouping_atoms(by)\n\n g_alias = aliases.get('g')\n grouping_alias = aliases.get('grouping')\n output_dict = {\n 'key': make_free_object({\n name: name_path(alias)\n for name, (alias, _) in alias_map.items()\n if alias in actual_keys\n }),\n 'grouping': qlast.FunctionCall(\n func='array_unpack',\n args=[name_path(grouping_alias)],\n ),\n 'elements': name_path(g_alias),\n }\n output_shape = make_free_object(output_dict)\n\n return qlast.InternalGroupQuery(\n span=node.span,\n aliases=node.aliases,\n subject_alias=node.subject_alias,\n subject=node.subject,\n # rewritten parts!\n using=using,\n by=by,\n group_alias=g_alias,\n grouping_alias=grouping_alias,\n result=output_shape,\n from_desugaring=True,\n )\n\n\ndef _count_alias_uses(\n node: qlast.Expr,\n alias: str,\n) -> int:\n uses = 0\n for child in ast.find_children(node, qlast.Path):\n match child:\n case astutils.alias_view((alias2, _)) if alias == alias2:\n uses += 1\n return uses\n\n\ndef try_group_rewrite(\n node: qlast.Query,\n aliases: AliasGenerator,\n) -> Optional[qlast.Query]:\n \"\"\"\n Try to apply some syntactic rewrites of GROUP expressions so we\n can generate better code.\n\n The two key desugarings are:\n\n * Sink a shape into the internal group result\n\n SELECT (GROUP ...) <shape>\n [filter-clause] [order-clause] [other clauses]\n =>\n SELECT (\n FOR GROUP ...\n UNION <igroup-body> <shape>\n [filter-clause]\n [order-clause]\n ) [other clauses]\n\n * Convert a FOR over a group into just an internal group (and\n a trivial FOR)\n\n FOR g in (GROUP ...) UNION <body>\n =>\n FOR GROUP ...\n UNION (\n FOR g IN (<group-body>)\n UNION <body>\n )\n \"\"\"\n\n # Inline trivial uses of aliases bound to a group and then\n # immediately used, so that we can apply the other optimizations.\n match node:\n case qlast.SelectQuery(\n aliases=[\n *_,\n qlast.AliasedExpr(alias=alias, expr=qlast.GroupQuery() as grp)\n ] as qaliases,\n result=qlast.Shape(\n expr=astutils.alias_view((alias2, [])),\n elements=elements,\n ) as result,\n ) if alias == alias2 and _count_alias_uses(result, alias) == 1:\n node = node.replace(\n aliases=qaliases[:-1],\n result=qlast.Shape(expr=grp, elements=elements),\n )\n\n case qlast.ForQuery(\n aliases=[\n *_,\n qlast.AliasedExpr(alias=alias, expr=qlast.GroupQuery() as grp)\n ] as qaliases,\n iterator=astutils.alias_view((alias2, [])),\n result=result,\n ) if alias == alias2 and _count_alias_uses(result, alias) == 0:\n node = node.replace(\n aliases=qaliases[:-1],\n iterator=grp,\n )\n\n # Sink shapes into the GROUP\n if (\n isinstance(node, qlast.SelectQuery)\n and isinstance(node.result, qlast.Shape)\n and isinstance(node.result.expr, qlast.GroupQuery)\n ):\n igroup = desugar_group(node.result.expr, aliases)\n igroup = igroup.replace(result=qlast.Shape(\n expr=igroup.result, elements=node.result.elements))\n\n # FILTER gets sunk into the body of the FOR GROUP\n if node.where or node.orderby:\n igroup = igroup.replace(\n # We need to move the result_alias in case\n # the FILTER depends on it.\n result_alias=node.result_alias,\n where=node.where,\n orderby=node.orderby,\n )\n\n return node.replace(\n result=igroup, result_alias=None, where=None, orderby=None)\n\n # Eliminate FORs over GROUPs\n if (\n isinstance(node, qlast.ForQuery)\n and isinstance(node.iterator, qlast.GroupQuery)\n ):\n igroup = desugar_group(node.iterator, aliases)\n new_result = qlast.ForQuery(\n iterator_alias=node.iterator_alias,\n iterator=igroup.result,\n result=node.result,\n )\n return igroup.replace(result=new_result, aliases=node.aliases)\n\n return None\n", "path": "edb/edgeql/desugar_group.py"}]} | 3,576 | 372 |
gh_patches_debug_30873 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/tensor/param_op_hook.py]
1 from abc import ABC, abstractmethod
2 from contextlib import contextmanager
3 from typing import Any, List, Tuple
4
5 import torch
6 from torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten
7
8
9 class ColoParamOpHook(ABC):
10 """
11 Hook which is triggered by each operation when operands contain ColoParameter.
12 To customize it, you must inherit this abstract class, and implement ``pre_forward``,
13 ``post_forward``, ``pre_backward`` and ``post_backward``.
14 These four methods apply a list of ColoParameter as input args.
15 """
16
17 @abstractmethod
18 def pre_forward(self, params: List[torch.Tensor]) -> None:
19 pass
20
21 @abstractmethod
22 def post_forward(self, params: List[torch.Tensor]) -> None:
23 pass
24
25 @abstractmethod
26 def pre_backward(self, params: List[torch.Tensor]) -> None:
27 pass
28
29 @abstractmethod
30 def post_backward(self, params: List[torch.Tensor]) -> None:
31 pass
32
33
34 class ColoParamOpHookManager:
35 """
36 Manage your param op hooks. It only has static methods.
37 The only static method you should call is ``use_hooks(*hooks)``.
38 """
39
40 hooks: Tuple[ColoParamOpHook, ...] = tuple()
41
42 @staticmethod
43 @contextmanager
44 def use_hooks(*hooks: ColoParamOpHook):
45 """Change the param op hooks you use. Nested calling is allowed.
46
47 Example:
48 >>> with ColoParamOpHookManager.use_hooks(*hooks):
49 >>> do_something()
50 >>> with ColoParamOpHookManager.use_hooks():
51 >>> // clear hooks
52 >>> do_something()
53 """
54 try:
55 old_param_op_hooks = ColoParamOpHookManager.hooks
56 ColoParamOpHookManager.hooks = hooks
57 yield
58 finally:
59 ColoParamOpHookManager.hooks = old_param_op_hooks
60
61 @staticmethod
62 def _trigger_pre_forward(params: List[torch.Tensor]) -> None:
63 for hook in ColoParamOpHookManager.hooks:
64 hook.pre_forward(params)
65
66 @staticmethod
67 def _trigger_post_forward(params: List[torch.Tensor]) -> None:
68 for hook in ColoParamOpHookManager.hooks:
69 hook.post_forward(params)
70
71 @staticmethod
72 def _trigger_pre_backward(params: List[torch.Tensor]) -> None:
73 for hook in ColoParamOpHookManager.hooks:
74 hook.pre_backward(params)
75
76 @staticmethod
77 def _trigger_post_backward(params: List[torch.Tensor]) -> None:
78 for hook in ColoParamOpHookManager.hooks:
79 hook.post_backward(params)
80
81 @staticmethod
82 def pre_op(params: List[torch.Tensor], *args: Any) -> list:
83 ColoParamOpHookManager._trigger_pre_forward(params)
84 # auto grad function can only recognize torch.Tensor, thus we have to flatten the input
85 # if one of the input requires grad, all the output will be treated as requires grad
86 # and will have grad fn even the corresponding input does not require grad
87 # we have to extract tensors requiring grad into flat list and then merge them back
88 grad_args, other_args, grad_flags, spec = _flatten_grad_args(args)
89 new_grad_args = PreFwdPostBwd.apply(params, *grad_args)
90 return _merge_args(new_grad_args, other_args, grad_flags, spec)
91
92 @staticmethod
93 def post_op(params: List[torch.Tensor], arg: Any) -> Any:
94 ColoParamOpHookManager._trigger_post_forward(params)
95 return PostFwdPreBwd.apply(params, arg)
96
97 @staticmethod
98 def has_hook() -> bool:
99 return len(ColoParamOpHookManager.hooks) > 0
100
101
102 class PreFwdPostBwd(torch.autograd.Function):
103 @staticmethod
104 def forward(ctx, params, *args):
105 ctx.params = params
106 return args
107
108 @staticmethod
109 def backward(ctx, *grads):
110 ColoParamOpHookManager._trigger_post_backward(ctx.params)
111 return (None,) + grads
112
113
114 class PostFwdPreBwd(torch.autograd.Function):
115 @staticmethod
116 def forward(ctx, params, args):
117 ctx.params = params
118 return args
119
120 @staticmethod
121 def backward(ctx, *grads):
122 ColoParamOpHookManager._trigger_pre_backward(ctx.params)
123 return (None,) + grads
124
125
126 def _is_grad_tensor(obj) -> bool:
127 if torch.is_tensor(obj):
128 if obj.grad_fn is not None or obj.requires_grad:
129 return True
130 return False
131
132
133 def _flatten_grad_args(args) -> Tuple[list, list, List[bool], TreeSpec]:
134 flat_args, spec = tree_flatten(args)
135 grad_args = []
136 other_args = []
137 grad_flags = []
138 for arg in flat_args:
139 flag = _is_grad_tensor(arg)
140 grad_flags.append(flag)
141 if flag:
142 grad_args.append(arg)
143 else:
144 other_args.append(arg)
145 assert len(grad_args) > 0
146 return grad_args, other_args, grad_flags, spec
147
148
149 def _merge_args(grad_args, other_args, grad_flags, spec):
150 grad_iter = iter(grad_args)
151 other_iter = iter(other_args)
152 flat_args = [next(grad_iter) if flag else next(other_iter) for flag in grad_flags]
153 return tree_unflatten(flat_args, spec)
154
[end of colossalai/tensor/param_op_hook.py]
[start of colossalai/tensor/colo_parameter.py]
1 from typing import Optional
2
3 import torch
4
5 from colossalai.tensor.colo_tensor import ColoTensor
6 from colossalai.tensor.param_op_hook import ColoParamOpHookManager
7
8 from .colo_tensor import _convert_output
9
10 WHITE_LIST_FUNCS = {torch.Tensor.__getitem__, torch.Tensor.is_floating_point}
11
12
13 def is_no_hook_op(func) -> bool:
14 return func.__name__.startswith("__") and func not in WHITE_LIST_FUNCS
15
16
17 def filter_colo_parameters(*args, **kwargs):
18 param_list = []
19
20 def get_colo_parameters(element) -> None:
21 if isinstance(element, list) or isinstance(element, tuple):
22 for e in element:
23 get_colo_parameters(e)
24 elif isinstance(element, dict):
25 raise RuntimeError("Found Dict: ColoParameter can't deal with complicated arguments.")
26 elif isinstance(element, ColoParameter):
27 param_list.append(element)
28 return
29
30 for a in args:
31 get_colo_parameters(a)
32 for v in kwargs.values():
33 get_colo_parameters(v)
34
35 return param_list
36
37
38 def replace_args(args, kwargs, new_args):
39 args = new_args[: len(args)]
40 for k, v in zip(kwargs.keys(), new_args[len(args) :]):
41 kwargs[k] = v
42 return tuple(args), kwargs
43
44
45 class ColoParameter(ColoTensor, torch.nn.Parameter):
46 r"""A kind of ColoTensor to be considered as a module parameter."""
47
48 def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad: bool = True) -> "ColoParameter":
49 if data is None:
50 data = torch.empty(0)
51 return torch.Tensor._make_subclass(cls, data, requires_grad)
52
53 @classmethod
54 def __torch_function__(cls, func, types, args=..., kwargs=None):
55 if kwargs is None:
56 kwargs = {}
57 if ColoParamOpHookManager.has_hook() and not is_no_hook_op(func):
58 params = filter_colo_parameters(*args, **kwargs)
59 if len(params) > 0:
60 with torch._C.DisableTorchFunction():
61 new_args = ColoParamOpHookManager.pre_op(params, *args, *kwargs.values())
62 args, kwargs = replace_args(args, kwargs, new_args)
63 ret = super().__torch_function__(func, types, args, kwargs)
64 with torch._C.DisableTorchFunction():
65 ret = ColoParamOpHookManager.post_op(params, ret)
66 return _convert_output(ret, func)
67 return super().__torch_function__(func, types, args, kwargs)
68
69 def __deepcopy__(self, memo):
70 if id(self) in memo:
71 return memo[id(self)]
72 else:
73 with torch._C.DisableTorchFunction():
74 data = self.data.clone()
75 tensor = ColoParameter(data, self.requires_grad)
76 memo[id(self)] = tensor
77 return tensor
78
79 def __reduce_ex__(self, proto):
80 # Adapted from torch._utils._rebuild_parameter
81 # def _rebuild_colo_parameter(data, requires_grad, backward_hooks):
82 # colo_param = ColoParameter(data, requires_grad)
83 # colo_param._backward_hooks = backward_hooks
84 # return colo_param
85
86 # return (
87 # _rebuild_colo_parameter,
88 # (self.data, self.requires_grad, OrderedDict())
89 # )
90
91 # TODO(jzy) we don't support object reflection now.
92 # distspec cannot be pickled or rebuilt because it's tightly connected to runtime attribute `process_group`.
93 raise NotImplementedError
94
[end of colossalai/tensor/colo_parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/tensor/colo_parameter.py b/colossalai/tensor/colo_parameter.py
--- a/colossalai/tensor/colo_parameter.py
+++ b/colossalai/tensor/colo_parameter.py
@@ -7,11 +7,12 @@
from .colo_tensor import _convert_output
-WHITE_LIST_FUNCS = {torch.Tensor.__getitem__, torch.Tensor.is_floating_point}
+WHITE_LIST_FUNCS = {torch.Tensor.__getitem__}
+NO_HOOK_FUNCS = {torch.Tensor.is_floating_point}
def is_no_hook_op(func) -> bool:
- return func.__name__.startswith("__") and func not in WHITE_LIST_FUNCS
+ return (func.__name__.startswith("__") and func not in WHITE_LIST_FUNCS) or func in NO_HOOK_FUNCS
def filter_colo_parameters(*args, **kwargs):
diff --git a/colossalai/tensor/param_op_hook.py b/colossalai/tensor/param_op_hook.py
--- a/colossalai/tensor/param_op_hook.py
+++ b/colossalai/tensor/param_op_hook.py
@@ -92,7 +92,10 @@
@staticmethod
def post_op(params: List[torch.Tensor], arg: Any) -> Any:
ColoParamOpHookManager._trigger_post_forward(params)
- return PostFwdPreBwd.apply(params, arg)
+ # incase the output is a tuple, we have to flatten it
+ grad_args, other_args, grad_flags, spec = _flatten_grad_args(arg)
+ new_grad_args = PostFwdPreBwd.apply(params, *grad_args)
+ return _merge_args(new_grad_args, other_args, grad_flags, spec)
@staticmethod
def has_hook() -> bool:
@@ -113,7 +116,7 @@
class PostFwdPreBwd(torch.autograd.Function):
@staticmethod
- def forward(ctx, params, args):
+ def forward(ctx, params, *args):
ctx.params = params
return args
@@ -142,7 +145,6 @@
grad_args.append(arg)
else:
other_args.append(arg)
- assert len(grad_args) > 0
return grad_args, other_args, grad_flags, spec
| {"golden_diff": "diff --git a/colossalai/tensor/colo_parameter.py b/colossalai/tensor/colo_parameter.py\n--- a/colossalai/tensor/colo_parameter.py\n+++ b/colossalai/tensor/colo_parameter.py\n@@ -7,11 +7,12 @@\n \n from .colo_tensor import _convert_output\n \n-WHITE_LIST_FUNCS = {torch.Tensor.__getitem__, torch.Tensor.is_floating_point}\n+WHITE_LIST_FUNCS = {torch.Tensor.__getitem__}\n+NO_HOOK_FUNCS = {torch.Tensor.is_floating_point}\n \n \n def is_no_hook_op(func) -> bool:\n- return func.__name__.startswith(\"__\") and func not in WHITE_LIST_FUNCS\n+ return (func.__name__.startswith(\"__\") and func not in WHITE_LIST_FUNCS) or func in NO_HOOK_FUNCS\n \n \n def filter_colo_parameters(*args, **kwargs):\ndiff --git a/colossalai/tensor/param_op_hook.py b/colossalai/tensor/param_op_hook.py\n--- a/colossalai/tensor/param_op_hook.py\n+++ b/colossalai/tensor/param_op_hook.py\n@@ -92,7 +92,10 @@\n @staticmethod\n def post_op(params: List[torch.Tensor], arg: Any) -> Any:\n ColoParamOpHookManager._trigger_post_forward(params)\n- return PostFwdPreBwd.apply(params, arg)\n+ # incase the output is a tuple, we have to flatten it\n+ grad_args, other_args, grad_flags, spec = _flatten_grad_args(arg)\n+ new_grad_args = PostFwdPreBwd.apply(params, *grad_args)\n+ return _merge_args(new_grad_args, other_args, grad_flags, spec)\n \n @staticmethod\n def has_hook() -> bool:\n@@ -113,7 +116,7 @@\n \n class PostFwdPreBwd(torch.autograd.Function):\n @staticmethod\n- def forward(ctx, params, args):\n+ def forward(ctx, params, *args):\n ctx.params = params\n return args\n \n@@ -142,7 +145,6 @@\n grad_args.append(arg)\n else:\n other_args.append(arg)\n- assert len(grad_args) > 0\n return grad_args, other_args, grad_flags, spec\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, List, Tuple\n\nimport torch\nfrom torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten\n\n\nclass ColoParamOpHook(ABC):\n \"\"\"\n Hook which is triggered by each operation when operands contain ColoParameter.\n To customize it, you must inherit this abstract class, and implement ``pre_forward``,\n ``post_forward``, ``pre_backward`` and ``post_backward``.\n These four methods apply a list of ColoParameter as input args.\n \"\"\"\n\n @abstractmethod\n def pre_forward(self, params: List[torch.Tensor]) -> None:\n pass\n\n @abstractmethod\n def post_forward(self, params: List[torch.Tensor]) -> None:\n pass\n\n @abstractmethod\n def pre_backward(self, params: List[torch.Tensor]) -> None:\n pass\n\n @abstractmethod\n def post_backward(self, params: List[torch.Tensor]) -> None:\n pass\n\n\nclass ColoParamOpHookManager:\n \"\"\"\n Manage your param op hooks. It only has static methods.\n The only static method you should call is ``use_hooks(*hooks)``.\n \"\"\"\n\n hooks: Tuple[ColoParamOpHook, ...] = tuple()\n\n @staticmethod\n @contextmanager\n def use_hooks(*hooks: ColoParamOpHook):\n \"\"\"Change the param op hooks you use. Nested calling is allowed.\n\n Example:\n >>> with ColoParamOpHookManager.use_hooks(*hooks):\n >>> do_something()\n >>> with ColoParamOpHookManager.use_hooks():\n >>> // clear hooks\n >>> do_something()\n \"\"\"\n try:\n old_param_op_hooks = ColoParamOpHookManager.hooks\n ColoParamOpHookManager.hooks = hooks\n yield\n finally:\n ColoParamOpHookManager.hooks = old_param_op_hooks\n\n @staticmethod\n def _trigger_pre_forward(params: List[torch.Tensor]) -> None:\n for hook in ColoParamOpHookManager.hooks:\n hook.pre_forward(params)\n\n @staticmethod\n def _trigger_post_forward(params: List[torch.Tensor]) -> None:\n for hook in ColoParamOpHookManager.hooks:\n hook.post_forward(params)\n\n @staticmethod\n def _trigger_pre_backward(params: List[torch.Tensor]) -> None:\n for hook in ColoParamOpHookManager.hooks:\n hook.pre_backward(params)\n\n @staticmethod\n def _trigger_post_backward(params: List[torch.Tensor]) -> None:\n for hook in ColoParamOpHookManager.hooks:\n hook.post_backward(params)\n\n @staticmethod\n def pre_op(params: List[torch.Tensor], *args: Any) -> list:\n ColoParamOpHookManager._trigger_pre_forward(params)\n # auto grad function can only recognize torch.Tensor, thus we have to flatten the input\n # if one of the input requires grad, all the output will be treated as requires grad\n # and will have grad fn even the corresponding input does not require grad\n # we have to extract tensors requiring grad into flat list and then merge them back\n grad_args, other_args, grad_flags, spec = _flatten_grad_args(args)\n new_grad_args = PreFwdPostBwd.apply(params, *grad_args)\n return _merge_args(new_grad_args, other_args, grad_flags, spec)\n\n @staticmethod\n def post_op(params: List[torch.Tensor], arg: Any) -> Any:\n ColoParamOpHookManager._trigger_post_forward(params)\n return PostFwdPreBwd.apply(params, arg)\n\n @staticmethod\n def has_hook() -> bool:\n return len(ColoParamOpHookManager.hooks) > 0\n\n\nclass PreFwdPostBwd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, params, *args):\n ctx.params = params\n return args\n\n @staticmethod\n def backward(ctx, *grads):\n ColoParamOpHookManager._trigger_post_backward(ctx.params)\n return (None,) + grads\n\n\nclass PostFwdPreBwd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, params, args):\n ctx.params = params\n return args\n\n @staticmethod\n def backward(ctx, *grads):\n ColoParamOpHookManager._trigger_pre_backward(ctx.params)\n return (None,) + grads\n\n\ndef _is_grad_tensor(obj) -> bool:\n if torch.is_tensor(obj):\n if obj.grad_fn is not None or obj.requires_grad:\n return True\n return False\n\n\ndef _flatten_grad_args(args) -> Tuple[list, list, List[bool], TreeSpec]:\n flat_args, spec = tree_flatten(args)\n grad_args = []\n other_args = []\n grad_flags = []\n for arg in flat_args:\n flag = _is_grad_tensor(arg)\n grad_flags.append(flag)\n if flag:\n grad_args.append(arg)\n else:\n other_args.append(arg)\n assert len(grad_args) > 0\n return grad_args, other_args, grad_flags, spec\n\n\ndef _merge_args(grad_args, other_args, grad_flags, spec):\n grad_iter = iter(grad_args)\n other_iter = iter(other_args)\n flat_args = [next(grad_iter) if flag else next(other_iter) for flag in grad_flags]\n return tree_unflatten(flat_args, spec)\n", "path": "colossalai/tensor/param_op_hook.py"}, {"content": "from typing import Optional\n\nimport torch\n\nfrom colossalai.tensor.colo_tensor import ColoTensor\nfrom colossalai.tensor.param_op_hook import ColoParamOpHookManager\n\nfrom .colo_tensor import _convert_output\n\nWHITE_LIST_FUNCS = {torch.Tensor.__getitem__, torch.Tensor.is_floating_point}\n\n\ndef is_no_hook_op(func) -> bool:\n return func.__name__.startswith(\"__\") and func not in WHITE_LIST_FUNCS\n\n\ndef filter_colo_parameters(*args, **kwargs):\n param_list = []\n\n def get_colo_parameters(element) -> None:\n if isinstance(element, list) or isinstance(element, tuple):\n for e in element:\n get_colo_parameters(e)\n elif isinstance(element, dict):\n raise RuntimeError(\"Found Dict: ColoParameter can't deal with complicated arguments.\")\n elif isinstance(element, ColoParameter):\n param_list.append(element)\n return\n\n for a in args:\n get_colo_parameters(a)\n for v in kwargs.values():\n get_colo_parameters(v)\n\n return param_list\n\n\ndef replace_args(args, kwargs, new_args):\n args = new_args[: len(args)]\n for k, v in zip(kwargs.keys(), new_args[len(args) :]):\n kwargs[k] = v\n return tuple(args), kwargs\n\n\nclass ColoParameter(ColoTensor, torch.nn.Parameter):\n r\"\"\"A kind of ColoTensor to be considered as a module parameter.\"\"\"\n\n def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad: bool = True) -> \"ColoParameter\":\n if data is None:\n data = torch.empty(0)\n return torch.Tensor._make_subclass(cls, data, requires_grad)\n\n @classmethod\n def __torch_function__(cls, func, types, args=..., kwargs=None):\n if kwargs is None:\n kwargs = {}\n if ColoParamOpHookManager.has_hook() and not is_no_hook_op(func):\n params = filter_colo_parameters(*args, **kwargs)\n if len(params) > 0:\n with torch._C.DisableTorchFunction():\n new_args = ColoParamOpHookManager.pre_op(params, *args, *kwargs.values())\n args, kwargs = replace_args(args, kwargs, new_args)\n ret = super().__torch_function__(func, types, args, kwargs)\n with torch._C.DisableTorchFunction():\n ret = ColoParamOpHookManager.post_op(params, ret)\n return _convert_output(ret, func)\n return super().__torch_function__(func, types, args, kwargs)\n\n def __deepcopy__(self, memo):\n if id(self) in memo:\n return memo[id(self)]\n else:\n with torch._C.DisableTorchFunction():\n data = self.data.clone()\n tensor = ColoParameter(data, self.requires_grad)\n memo[id(self)] = tensor\n return tensor\n\n def __reduce_ex__(self, proto):\n # Adapted from torch._utils._rebuild_parameter\n # def _rebuild_colo_parameter(data, requires_grad, backward_hooks):\n # colo_param = ColoParameter(data, requires_grad)\n # colo_param._backward_hooks = backward_hooks\n # return colo_param\n\n # return (\n # _rebuild_colo_parameter,\n # (self.data, self.requires_grad, OrderedDict())\n # )\n\n # TODO(jzy) we don't support object reflection now.\n # distspec cannot be pickled or rebuilt because it's tightly connected to runtime attribute `process_group`.\n raise NotImplementedError\n", "path": "colossalai/tensor/colo_parameter.py"}]} | 3,061 | 508 |
gh_patches_debug_25250 | rasdani/github-patches | git_diff | pre-commit__pre-commit-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
^C^C during installation may leave pre-commit in a bad state
There's code which handles the first ^C, however I think the second one (during execution of the finally block) may not be handled well. I probably need to make the cleanup atomic somehow...
</issue>
<code>
[start of pre_commit/repository.py]
1 from __future__ import unicode_literals
2
3 from cached_property import cached_property
4
5 from pre_commit.languages.all import languages
6 from pre_commit.manifest import Manifest
7 from pre_commit.prefixed_command_runner import PrefixedCommandRunner
8
9
10 class Repository(object):
11 def __init__(self, repo_config, repo_path_getter):
12 self.repo_config = repo_config
13 self.repo_path_getter = repo_path_getter
14 self.__installed = False
15
16 @classmethod
17 def create(cls, config, store):
18 repo_path_getter = store.get_repo_path_getter(
19 config['repo'], config['sha']
20 )
21 return cls(config, repo_path_getter)
22
23 @cached_property
24 def repo_url(self):
25 return self.repo_config['repo']
26
27 @cached_property
28 def sha(self):
29 return self.repo_config['sha']
30
31 @cached_property
32 def languages(self):
33 return set(
34 (hook['language'], hook['language_version'])
35 for _, hook in self.hooks
36 )
37
38 @cached_property
39 def hooks(self):
40 # TODO: merging in manifest dicts is a smell imo
41 return tuple(
42 (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))
43 for hook in self.repo_config['hooks']
44 )
45
46 @cached_property
47 def manifest(self):
48 return Manifest(self.repo_path_getter)
49
50 @cached_property
51 def cmd_runner(self):
52 return PrefixedCommandRunner(self.repo_path_getter.repo_path)
53
54 def require_installed(self):
55 if self.__installed:
56 return
57
58 self.install()
59 self.__installed = True
60
61 def install(self):
62 """Install the hook repository."""
63 for language_name, language_version in self.languages:
64 language = languages[language_name]
65 if (
66 language.ENVIRONMENT_DIR is None or
67 self.cmd_runner.exists(language.ENVIRONMENT_DIR)
68 ):
69 # The language is already installed
70 continue
71 language.install_environment(self.cmd_runner, language_version)
72
73 def run_hook(self, hook, file_args):
74 """Run a hook.
75
76 Args:
77 hook - Hook dictionary
78 file_args - List of files to run
79 """
80 self.require_installed()
81 return languages[hook['language']].run_hook(
82 self.cmd_runner, hook, file_args,
83 )
84
[end of pre_commit/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/repository.py b/pre_commit/repository.py
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -1,5 +1,7 @@
from __future__ import unicode_literals
+import shutil
+
from cached_property import cached_property
from pre_commit.languages.all import languages
@@ -64,11 +66,21 @@
language = languages[language_name]
if (
language.ENVIRONMENT_DIR is None or
- self.cmd_runner.exists(language.ENVIRONMENT_DIR)
+ self.cmd_runner.exists(language.ENVIRONMENT_DIR, '.installed')
):
# The language is already installed
continue
+ # There's potentially incomplete cleanup from previous runs
+ # Clean it up!
+ if self.cmd_runner.exists(language.ENVIRONMENT_DIR):
+ shutil.rmtree(self.cmd_runner.path(language.ENVIRONMENT_DIR))
+
language.install_environment(self.cmd_runner, language_version)
+ # Touch the .installed file (atomic) to indicate we've installed
+ open(
+ self.cmd_runner.path(language.ENVIRONMENT_DIR, '.installed'),
+ 'w',
+ ).close()
def run_hook(self, hook, file_args):
"""Run a hook.
| {"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -1,5 +1,7 @@\n from __future__ import unicode_literals\n \n+import shutil\n+\n from cached_property import cached_property\n \n from pre_commit.languages.all import languages\n@@ -64,11 +66,21 @@\n language = languages[language_name]\n if (\n language.ENVIRONMENT_DIR is None or\n- self.cmd_runner.exists(language.ENVIRONMENT_DIR)\n+ self.cmd_runner.exists(language.ENVIRONMENT_DIR, '.installed')\n ):\n # The language is already installed\n continue\n+ # There's potentially incomplete cleanup from previous runs\n+ # Clean it up!\n+ if self.cmd_runner.exists(language.ENVIRONMENT_DIR):\n+ shutil.rmtree(self.cmd_runner.path(language.ENVIRONMENT_DIR))\n+\n language.install_environment(self.cmd_runner, language_version)\n+ # Touch the .installed file (atomic) to indicate we've installed\n+ open(\n+ self.cmd_runner.path(language.ENVIRONMENT_DIR, '.installed'),\n+ 'w',\n+ ).close()\n \n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n", "issue": "^C^C during installation may leave pre-commit in a bad state\nThere's code which handles the first ^C, however I think the second one (during execution of the finally block) may not be handled well. I probably need to make the cleanup atomic somehow...\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom cached_property import cached_property\n\nfrom pre_commit.languages.all import languages\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def hooks(self):\n # TODO: merging in manifest dicts is a smell imo\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if (\n language.ENVIRONMENT_DIR is None or\n self.cmd_runner.exists(language.ENVIRONMENT_DIR)\n ):\n # The language is already installed\n continue\n language.install_environment(self.cmd_runner, language_version)\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n", "path": "pre_commit/repository.py"}]} | 1,249 | 263 |
gh_patches_debug_13090 | rasdani/github-patches | git_diff | nltk__nltk-782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BLEU score returns 1 (perfect match) instead of zero
Hi, taken from bleu implementation:
``` python
@staticmethod
def compute(candidate, references, weights):
candidate = [c.lower() for c in candidate]
references = [[r.lower() for r in reference] for reference in references]
p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))
s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
bp = BLEU.brevity_penalty(candidate, references)
return bp * math.exp(s)
```
This function incorrectly returns BLEU score 1 when the candidate has no alignment to any of the references. In this case, `p_ns` will be all zeros because there is no overlap for any n-grams, which will make `s` zero, which will return `1` at the end.
There should be a special case check for the case when there is zero alignment and return zero correctly.
</issue>
<code>
[start of nltk/align/bleu.py]
1 # -*- coding: utf-8 -*-
2 # Natural Language Toolkit: BLEU
3 #
4 # Copyright (C) 2001-2013 NLTK Project
5 # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
6 # URL: <http://nltk.org/>
7 # For license information, see LICENSE.TXT
8
9 from __future__ import division
10
11 import math
12
13 from nltk import word_tokenize
14 from nltk.compat import Counter
15 from nltk.util import ngrams
16
17
18 class BLEU(object):
19 """
20 This class implements the BLEU method, which is used to evaluate
21 the quality of machine translation. [1]
22
23 Consider an example:
24
25 >>> weights = [0.25, 0.25, 0.25, 0.25]
26 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
27 ... 'ensures', 'that', 'the', 'military', 'always',
28 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
29
30 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
31 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
32 ... 'that', 'party', 'direct']
33
34 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
35 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
36 ... 'heed', 'Party', 'commands']
37
38 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
39 ... 'guarantees', 'the', 'military', 'forces', 'always',
40 ... 'being', 'under', 'the', 'command', 'of', 'the',
41 ... 'Party']
42
43 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
44 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
45 ... 'of', 'the', 'party']
46
47 The BLEU method mainly consists of two parts:
48
49 Part 1 - modified n-gram precision
50
51 The normal precision method may lead to some wrong translations with
52 high-precision, e.g., the translation, in which a word of reference
53 repeats several times, has very high precision. So in the modified
54 n-gram precision, a reference word will be considered exhausted after
55 a matching candidate word is identified.
56
57 Unigrams:
58
59 >>> BLEU.modified_precision(
60 ... candidate1,
61 ... [reference1, reference2, reference3],
62 ... n=1,
63 ... )
64 0.94...
65
66 >>> BLEU.modified_precision(
67 ... candidate2,
68 ... [reference1, reference2, reference3],
69 ... n=1,
70 ... )
71 0.57...
72
73 Bigrmas:
74
75 >>> BLEU.modified_precision(
76 ... candidate1,
77 ... [reference1, reference2, reference3],
78 ... n=2,
79 ... )
80 0.58...
81
82 >>> BLEU.modified_precision(
83 ... candidate2,
84 ... [reference1, reference2, reference3],
85 ... n=2,
86 ... )
87 0.07...
88
89
90 Part 2 - brevity penalty
91
92 As the modified n-gram precision still has the problem from the short
93 length sentence, brevity penalty is used to modify the overall BLEU
94 score according to length.
95
96 >>> BLEU.compute(candidate1, [reference1, reference2, reference3], weights)
97 0.504...
98
99 >>> BLEU.compute(candidate2, [reference1, reference2, reference3], weights)
100 0.457...
101
102 2. Test with two corpus that one is a reference and another is
103 an output from translation system:
104
105 >>> weights = [0.25, 0.25, 0.25, 0.25]
106 >>> ref_file = open('newstest2012-ref.en') # doctest: +SKIP
107 >>> candidate_file = open('newstest2012.fr-en.cmu-avenue') # doctest: +SKIP
108
109 >>> total = 0.0
110 >>> count = 0
111
112 >>> for candi_raw in candidate_file: # doctest: +SKIP
113 ... ref_raw = ref_file.readline()
114 ... ref_tokens = word_tokenize(ref_raw)
115 ... candi_tokens = word_tokenize(candi_raw)
116 ... total = BLEU.compute(candi_tokens, [ref_tokens], weights)
117 ... count += 1
118
119 >>> total / count # doctest: +SKIP
120 2.787504437460048e-05
121
122 [1] Papineni, Kishore, et al. "BLEU: a method for automatic evaluation of
123 machine translation." Proceedings of the 40th annual meeting on
124 association for computational linguistics. Association for Computational
125 Linguistics, 2002.
126
127 """
128
129 @staticmethod
130 def compute(candidate, references, weights):
131 candidate = [c.lower() for c in candidate]
132 references = [[r.lower() for r in reference] for reference in references]
133
134 p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))
135 s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
136
137 bp = BLEU.brevity_penalty(candidate, references)
138 return bp * math.exp(s)
139
140 @staticmethod
141 def modified_precision(candidate, references, n):
142 """ Calculate modified ngram precision.
143
144 >>> BLEU.modified_precision(
145 ... 'the the the the the the the'.split(),
146 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],
147 ... n=1,
148 ... )
149 0.28...
150
151 >>> BLEU.modified_precision(
152 ... 'the the the the the the the'.split(),
153 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],
154 ... n=2,
155 ... )
156 0.0
157
158 >>> BLEU.modified_precision(
159 ... 'of the'.split(),
160 ... [
161 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),
162 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),
163 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),
164 ... ],
165 ... n=1,
166 ... )
167 1.0
168
169 >>> BLEU.modified_precision(
170 ... 'of the'.split(),
171 ... [
172 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),
173 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),
174 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),
175 ... ],
176 ... n=2,
177 ... )
178 1.0
179
180 """
181 counts = Counter(ngrams(candidate, n))
182
183 if not counts:
184 return 0
185
186 max_counts = {}
187 for reference in references:
188 reference_counts = Counter(ngrams(reference, n))
189 for ngram in counts:
190 max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
191
192 clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())
193
194 return sum(clipped_counts.values()) / sum(counts.values())
195
196 @staticmethod
197 def brevity_penalty(candidate, references):
198 c = len(candidate)
199 r = min(abs(len(r) - c) for r in references)
200
201 if c > r:
202 return 1
203 else:
204 return math.exp(1 - r / c)
205
206 # run doctests
207 if __name__ == "__main__":
208 import doctest
209 doctest.testmod()
210
[end of nltk/align/bleu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nltk/align/bleu.py b/nltk/align/bleu.py
--- a/nltk/align/bleu.py
+++ b/nltk/align/bleu.py
@@ -132,7 +132,13 @@
references = [[r.lower() for r in reference] for reference in references]
p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))
- s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
+ p_ns_nonzero = list(filter(None, p_ns))
+
+ if not p_ns_nonzero:
+ # There is zero aliment, so the score is 0
+ return 0
+
+ s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns_nonzero))
bp = BLEU.brevity_penalty(candidate, references)
return bp * math.exp(s)
| {"golden_diff": "diff --git a/nltk/align/bleu.py b/nltk/align/bleu.py\n--- a/nltk/align/bleu.py\n+++ b/nltk/align/bleu.py\n@@ -132,7 +132,13 @@\n references = [[r.lower() for r in reference] for reference in references]\n \n p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))\n- s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)\n+ p_ns_nonzero = list(filter(None, p_ns))\n+\n+ if not p_ns_nonzero:\n+ # There is zero aliment, so the score is 0\n+ return 0\n+\n+ s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns_nonzero))\n \n bp = BLEU.brevity_penalty(candidate, references)\n return bp * math.exp(s)\n", "issue": "BLEU score returns 1 (perfect match) instead of zero\nHi, taken from bleu implementation:\n\n``` python\n @staticmethod\n def compute(candidate, references, weights):\n candidate = [c.lower() for c in candidate]\n references = [[r.lower() for r in reference] for reference in references]\n\n p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)\n\n bp = BLEU.brevity_penalty(candidate, references)\n return bp * math.exp(s)\n```\n\nThis function incorrectly returns BLEU score 1 when the candidate has no alignment to any of the references. In this case, `p_ns` will be all zeros because there is no overlap for any n-grams, which will make `s` zero, which will return `1` at the end.\n\nThere should be a special case check for the case when there is zero alignment and return zero correctly. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU\n#\n# Copyright (C) 2001-2013 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom __future__ import division\n\nimport math\n\nfrom nltk import word_tokenize\nfrom nltk.compat import Counter\nfrom nltk.util import ngrams\n\n\nclass BLEU(object):\n \"\"\"\n This class implements the BLEU method, which is used to evaluate\n the quality of machine translation. [1]\n\n Consider an example:\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n The BLEU method mainly consists of two parts:\n\n Part 1 - modified n-gram precision\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision. So in the modified\n n-gram precision, a reference word will be considered exhausted after\n a matching candidate word is identified.\n\n Unigrams:\n\n >>> BLEU.modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.94...\n\n >>> BLEU.modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.57...\n\n Bigrmas:\n\n >>> BLEU.modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.58...\n\n >>> BLEU.modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.07...\n\n\n Part 2 - brevity penalty\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n >>> BLEU.compute(candidate1, [reference1, reference2, reference3], weights)\n 0.504...\n\n >>> BLEU.compute(candidate2, [reference1, reference2, reference3], weights)\n 0.457...\n\n 2. Test with two corpus that one is a reference and another is\n an output from translation system:\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> ref_file = open('newstest2012-ref.en') # doctest: +SKIP\n >>> candidate_file = open('newstest2012.fr-en.cmu-avenue') # doctest: +SKIP\n\n >>> total = 0.0\n >>> count = 0\n\n >>> for candi_raw in candidate_file: # doctest: +SKIP\n ...\t\tref_raw = ref_file.readline()\n ...\t\tref_tokens = word_tokenize(ref_raw)\n ...\t\tcandi_tokens = word_tokenize(candi_raw)\n ...\t\ttotal = BLEU.compute(candi_tokens, [ref_tokens], weights)\n ...\t\tcount += 1\n\n >>> total / count # doctest: +SKIP\n 2.787504437460048e-05\n\n [1] Papineni, Kishore, et al. \"BLEU: a method for automatic evaluation of\n machine translation.\" Proceedings of the 40th annual meeting on\n association for computational linguistics. Association for Computational\n Linguistics, 2002.\n\n \"\"\"\n\n @staticmethod\n def compute(candidate, references, weights):\n candidate = [c.lower() for c in candidate]\n references = [[r.lower() for r in reference] for reference in references]\n\n p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)\n\n bp = BLEU.brevity_penalty(candidate, references)\n return bp * math.exp(s)\n\n @staticmethod\n def modified_precision(candidate, references, n):\n \"\"\" Calculate modified ngram precision.\n\n >>> BLEU.modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=1,\n ... )\n 0.28...\n\n >>> BLEU.modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=2,\n ... )\n 0.0\n\n >>> BLEU.modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=1,\n ... )\n 1.0\n\n >>> BLEU.modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=2,\n ... )\n 1.0\n\n \"\"\"\n counts = Counter(ngrams(candidate, n))\n\n if not counts:\n return 0\n\n max_counts = {}\n for reference in references:\n reference_counts = Counter(ngrams(reference, n))\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\n\n return sum(clipped_counts.values()) / sum(counts.values())\n\n @staticmethod\n def brevity_penalty(candidate, references):\n c = len(candidate)\n r = min(abs(len(r) - c) for r in references)\n\n if c > r:\n return 1\n else:\n return math.exp(1 - r / c)\n\n# run doctests\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "nltk/align/bleu.py"}]} | 3,200 | 230 |
gh_patches_debug_15990 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-9151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
run image_demo.py with cascade_rpn model show error
### Prerequisite
- [X] I have searched [the existing and past issues](https://github.com/open-mmlab/mmdetection/issues) but cannot get the expected help.
- [X] I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
- [X] The bug has not been fixed in the [latest version](https://github.com/open-mmlab/mmdetection).
### 🐞 Describe the bug
``` python demo/image_demo.py demo/demo.jpg configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py checkpoints/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth```
```
UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2895.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
Traceback (most recent call last):
File "demo/image_demo.py", line 68, in <module>
main(args)
File "demo/image_demo.py", line 38, in main
show_result_pyplot(
File "/home/ngi/IdeaProjects/mmlab/mmdetection/mmdet/apis/inference.py", line 241, in show_result_pyplot
model.show_result(
File "/home/ngi/IdeaProjects/mmlab/mmdetection/mmdet/models/detectors/rpn.py", line 159, in show_result
mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
TypeError: imshow_bboxes() got an unexpected keyword argument 'mask_color'
```
### Environment
```
Python: 3.8.13 (default, Mar 28 2022, 11:38:47) [GCC 7.5.0]
CUDA available: True
GPU 0: GeForce RTX 2080 SUPER
CUDA_HOME: /usr/local/cuda-10.2
NVCC: Cuda compilation tools, release 10.2, V10.2.8
GCC: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.12) 5.4.0 20160609
PyTorch: 1.12.1+cu102
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 10.2
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70
- CuDNN 7.6.5
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=10.2, CUDNN_VERSION=7.6.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,
TorchVision: 0.13.1+cu102
OpenCV: 4.6.0
MMCV: 1.6.2
MMCV Compiler: GCC 7.3
MMCV CUDA Compiler: 10.2
MMDetection: 2.25.2+9d3e162
```
### Additional information
_No response_
</issue>
<code>
[start of mmdet/models/detectors/rpn.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import warnings
3
4 import mmcv
5 import torch
6 from mmcv.image import tensor2imgs
7
8 from mmdet.core import bbox_mapping
9 from ..builder import DETECTORS, build_backbone, build_head, build_neck
10 from .base import BaseDetector
11
12
13 @DETECTORS.register_module()
14 class RPN(BaseDetector):
15 """Implementation of Region Proposal Network."""
16
17 def __init__(self,
18 backbone,
19 neck,
20 rpn_head,
21 train_cfg,
22 test_cfg,
23 pretrained=None,
24 init_cfg=None):
25 super(RPN, self).__init__(init_cfg)
26 if pretrained:
27 warnings.warn('DeprecationWarning: pretrained is deprecated, '
28 'please use "init_cfg" instead')
29 backbone.pretrained = pretrained
30 self.backbone = build_backbone(backbone)
31 self.neck = build_neck(neck) if neck is not None else None
32 rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
33 rpn_head.update(train_cfg=rpn_train_cfg)
34 rpn_head.update(test_cfg=test_cfg.rpn)
35 self.rpn_head = build_head(rpn_head)
36 self.train_cfg = train_cfg
37 self.test_cfg = test_cfg
38
39 def extract_feat(self, img):
40 """Extract features.
41
42 Args:
43 img (torch.Tensor): Image tensor with shape (n, c, h ,w).
44
45 Returns:
46 list[torch.Tensor]: Multi-level features that may have
47 different resolutions.
48 """
49 x = self.backbone(img)
50 if self.with_neck:
51 x = self.neck(x)
52 return x
53
54 def forward_dummy(self, img):
55 """Dummy forward function."""
56 x = self.extract_feat(img)
57 rpn_outs = self.rpn_head(x)
58 return rpn_outs
59
60 def forward_train(self,
61 img,
62 img_metas,
63 gt_bboxes=None,
64 gt_bboxes_ignore=None):
65 """
66 Args:
67 img (Tensor): Input images of shape (N, C, H, W).
68 Typically these should be mean centered and std scaled.
69 img_metas (list[dict]): A List of image info dict where each dict
70 has: 'img_shape', 'scale_factor', 'flip', and may also contain
71 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
72 For details on the values of these keys see
73 :class:`mmdet.datasets.pipelines.Collect`.
74 gt_bboxes (list[Tensor]): Each item are the truth boxes for each
75 image in [tl_x, tl_y, br_x, br_y] format.
76 gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
77 boxes can be ignored when computing the loss.
78
79 Returns:
80 dict[str, Tensor]: A dictionary of loss components.
81 """
82 if (isinstance(self.train_cfg.rpn, dict)
83 and self.train_cfg.rpn.get('debug', False)):
84 self.rpn_head.debug_imgs = tensor2imgs(img)
85
86 x = self.extract_feat(img)
87 losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
88 gt_bboxes_ignore)
89 return losses
90
91 def simple_test(self, img, img_metas, rescale=False):
92 """Test function without test time augmentation.
93
94 Args:
95 imgs (list[torch.Tensor]): List of multiple images
96 img_metas (list[dict]): List of image information.
97 rescale (bool, optional): Whether to rescale the results.
98 Defaults to False.
99
100 Returns:
101 list[np.ndarray]: proposals
102 """
103 x = self.extract_feat(img)
104 # get origin input shape to onnx dynamic input shape
105 if torch.onnx.is_in_onnx_export():
106 img_shape = torch._shape_as_tensor(img)[2:]
107 img_metas[0]['img_shape_for_onnx'] = img_shape
108 proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
109 if rescale:
110 for proposals, meta in zip(proposal_list, img_metas):
111 proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
112 if torch.onnx.is_in_onnx_export():
113 return proposal_list
114
115 return [proposal.cpu().numpy() for proposal in proposal_list]
116
117 def aug_test(self, imgs, img_metas, rescale=False):
118 """Test function with test time augmentation.
119
120 Args:
121 imgs (list[torch.Tensor]): List of multiple images
122 img_metas (list[dict]): List of image information.
123 rescale (bool, optional): Whether to rescale the results.
124 Defaults to False.
125
126 Returns:
127 list[np.ndarray]: proposals
128 """
129 proposal_list = self.rpn_head.aug_test_rpn(
130 self.extract_feats(imgs), img_metas)
131 if not rescale:
132 for proposals, img_meta in zip(proposal_list, img_metas[0]):
133 img_shape = img_meta['img_shape']
134 scale_factor = img_meta['scale_factor']
135 flip = img_meta['flip']
136 flip_direction = img_meta['flip_direction']
137 proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
138 scale_factor, flip,
139 flip_direction)
140 return [proposal.cpu().numpy() for proposal in proposal_list]
141
142 def show_result(self, data, result, top_k=20, **kwargs):
143 """Show RPN proposals on the image.
144
145 Args:
146 data (str or np.ndarray): Image filename or loaded image.
147 result (Tensor or tuple): The results to draw over `img`
148 bbox_result or (bbox_result, segm_result).
149 top_k (int): Plot the first k bboxes only
150 if set positive. Default: 20
151
152 Returns:
153 np.ndarray: The image with bboxes drawn on it.
154 """
155 if kwargs is not None:
156 kwargs.pop('score_thr', None)
157 kwargs.pop('text_color', None)
158 kwargs['colors'] = kwargs.pop('bbox_color', 'green')
159 mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
160
[end of mmdet/models/detectors/rpn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdet/models/detectors/rpn.py b/mmdet/models/detectors/rpn.py
--- a/mmdet/models/detectors/rpn.py
+++ b/mmdet/models/detectors/rpn.py
@@ -1,5 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
+from inspect import signature
import mmcv
import torch
@@ -153,7 +154,9 @@
np.ndarray: The image with bboxes drawn on it.
"""
if kwargs is not None:
- kwargs.pop('score_thr', None)
- kwargs.pop('text_color', None)
- kwargs['colors'] = kwargs.pop('bbox_color', 'green')
+ kwargs['colors'] = 'green'
+ sig = signature(mmcv.imshow_bboxes)
+ for k in list(kwargs.keys()):
+ if k not in sig.parameters:
+ kwargs.pop(k)
mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
| {"golden_diff": "diff --git a/mmdet/models/detectors/rpn.py b/mmdet/models/detectors/rpn.py\n--- a/mmdet/models/detectors/rpn.py\n+++ b/mmdet/models/detectors/rpn.py\n@@ -1,5 +1,6 @@\n # Copyright (c) OpenMMLab. All rights reserved.\n import warnings\n+from inspect import signature\n \n import mmcv\n import torch\n@@ -153,7 +154,9 @@\n np.ndarray: The image with bboxes drawn on it.\n \"\"\"\n if kwargs is not None:\n- kwargs.pop('score_thr', None)\n- kwargs.pop('text_color', None)\n- kwargs['colors'] = kwargs.pop('bbox_color', 'green')\n+ kwargs['colors'] = 'green'\n+ sig = signature(mmcv.imshow_bboxes)\n+ for k in list(kwargs.keys()):\n+ if k not in sig.parameters:\n+ kwargs.pop(k)\n mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)\n", "issue": "run image_demo.py with cascade_rpn model show error\n### Prerequisite\n\n- [X] I have searched [the existing and past issues](https://github.com/open-mmlab/mmdetection/issues) but cannot get the expected help.\n- [X] I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.\n- [X] The bug has not been fixed in the [latest version](https://github.com/open-mmlab/mmdetection).\n\n### \ud83d\udc1e Describe the bug\n\n``` python demo/image_demo.py demo/demo.jpg configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py checkpoints/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth```\r\n\r\n```\r\nUserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2895.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\nTraceback (most recent call last):\r\n File \"demo/image_demo.py\", line 68, in <module>\r\n main(args)\r\n File \"demo/image_demo.py\", line 38, in main\r\n show_result_pyplot(\r\n File \"/home/ngi/IdeaProjects/mmlab/mmdetection/mmdet/apis/inference.py\", line 241, in show_result_pyplot\r\n model.show_result(\r\n File \"/home/ngi/IdeaProjects/mmlab/mmdetection/mmdet/models/detectors/rpn.py\", line 159, in show_result\r\n mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)\r\nTypeError: imshow_bboxes() got an unexpected keyword argument 'mask_color'\r\n\r\n```\n\n### Environment\n\n```\r\nPython: 3.8.13 (default, Mar 28 2022, 11:38:47) [GCC 7.5.0]\r\nCUDA available: True\r\nGPU 0: GeForce RTX 2080 SUPER\r\nCUDA_HOME: /usr/local/cuda-10.2\r\nNVCC: Cuda compilation tools, release 10.2, V10.2.8\r\nGCC: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.12) 5.4.0 20160609\r\nPyTorch: 1.12.1+cu102\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 7.3\r\n - C++ Version: 201402\r\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - LAPACK is enabled (usually provided by MKL)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 10.2\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70\r\n - CuDNN 7.6.5\r\n - Magma 2.5.2\r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=10.2, CUDNN_VERSION=7.6.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \r\n\r\nTorchVision: 0.13.1+cu102\r\nOpenCV: 4.6.0\r\nMMCV: 1.6.2\r\nMMCV Compiler: GCC 7.3\r\nMMCV CUDA Compiler: 10.2\r\nMMDetection: 2.25.2+9d3e162\r\n```\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport torch\nfrom mmcv.image import tensor2imgs\n\nfrom mmdet.core import bbox_mapping\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\[email protected]_module()\nclass RPN(BaseDetector):\n \"\"\"Implementation of Region Proposal Network.\"\"\"\n\n def __init__(self,\n backbone,\n neck,\n rpn_head,\n train_cfg,\n test_cfg,\n pretrained=None,\n init_cfg=None):\n super(RPN, self).__init__(init_cfg)\n if pretrained:\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n self.neck = build_neck(neck) if neck is not None else None\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head.update(train_cfg=rpn_train_cfg)\n rpn_head.update(test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head)\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n def extract_feat(self, img):\n \"\"\"Extract features.\n\n Args:\n img (torch.Tensor): Image tensor with shape (n, c, h ,w).\n\n Returns:\n list[torch.Tensor]: Multi-level features that may have\n different resolutions.\n \"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Dummy forward function.\"\"\"\n x = self.extract_feat(img)\n rpn_outs = self.rpn_head(x)\n return rpn_outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes=None,\n gt_bboxes_ignore=None):\n \"\"\"\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n if (isinstance(self.train_cfg.rpn, dict)\n and self.train_cfg.rpn.get('debug', False)):\n self.rpn_head.debug_imgs = tensor2imgs(img)\n\n x = self.extract_feat(img)\n losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,\n gt_bboxes_ignore)\n return losses\n\n def simple_test(self, img, img_metas, rescale=False):\n \"\"\"Test function without test time augmentation.\n\n Args:\n imgs (list[torch.Tensor]): List of multiple images\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[np.ndarray]: proposals\n \"\"\"\n x = self.extract_feat(img)\n # get origin input shape to onnx dynamic input shape\n if torch.onnx.is_in_onnx_export():\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n if rescale:\n for proposals, meta in zip(proposal_list, img_metas):\n proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])\n if torch.onnx.is_in_onnx_export():\n return proposal_list\n\n return [proposal.cpu().numpy() for proposal in proposal_list]\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test function with test time augmentation.\n\n Args:\n imgs (list[torch.Tensor]): List of multiple images\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[np.ndarray]: proposals\n \"\"\"\n proposal_list = self.rpn_head.aug_test_rpn(\n self.extract_feats(imgs), img_metas)\n if not rescale:\n for proposals, img_meta in zip(proposal_list, img_metas[0]):\n img_shape = img_meta['img_shape']\n scale_factor = img_meta['scale_factor']\n flip = img_meta['flip']\n flip_direction = img_meta['flip_direction']\n proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,\n scale_factor, flip,\n flip_direction)\n return [proposal.cpu().numpy() for proposal in proposal_list]\n\n def show_result(self, data, result, top_k=20, **kwargs):\n \"\"\"Show RPN proposals on the image.\n\n Args:\n data (str or np.ndarray): Image filename or loaded image.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n top_k (int): Plot the first k bboxes only\n if set positive. Default: 20\n\n Returns:\n np.ndarray: The image with bboxes drawn on it.\n \"\"\"\n if kwargs is not None:\n kwargs.pop('score_thr', None)\n kwargs.pop('text_color', None)\n kwargs['colors'] = kwargs.pop('bbox_color', 'green')\n mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)\n", "path": "mmdet/models/detectors/rpn.py"}]} | 3,641 | 228 |
gh_patches_debug_43438 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2780 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E0002: Unknown exception while processing rule E1024: 'list' object has no attribute 'path'
### CloudFormation Lint Version
cfn-lint 0.77.10
### What operating system are you using?
Mac
### Describe the bug
When validation checks run on a the Cidr function `!Cidr [ ipBlock, count, cidrBits ]`, the count and cidrBits doesn't validate more complex functions like the `!If` in the example.
### Expected behavior
Doesn't throw unknown exception
### Reproduction template
```yaml
PublicSubnet1:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
AvailabilityZone: !Select [0, !GetAZs ""]
CidrBlock: !If
- defineYourOwnSubnetCIDR
- !Ref FirstTierSubnet1CIDR
- !Select
- 0
- !Cidr
- 10.0.0.0/24
- !If [3AZ, 16, 8]
- 4
```
</issue>
<code>
[start of src/cfnlint/rules/functions/Cidr.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import regex as re
6
7 from cfnlint.helpers import REGEX_CIDR
8 from cfnlint.rules import CloudFormationLintRule, RuleMatch
9
10
11 class Cidr(CloudFormationLintRule):
12 """Check if Cidr values are correct"""
13
14 id = "E1024"
15 shortdesc = "Cidr validation of parameters"
16 description = "Making sure the function CIDR is a list with valid values"
17 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-cidr.html"
18 tags = ["functions", "cidr"]
19
20 supported_functions = [
21 "Fn::FindInMap",
22 "Fn::Select",
23 "Ref",
24 "Fn::GetAtt",
25 "Fn::Sub",
26 "Fn::ImportValue",
27 ]
28
29 def check_ip_block(self, value, path):
30 matches = []
31 if isinstance(value, dict):
32 if len(value) == 1:
33 for index_key, _ in value.items():
34 if index_key not in self.supported_functions:
35 if index_key == "Fn::If":
36 if len(value.get("Fn::If")) == 3 and isinstance(
37 value.get("Fn::If"), list
38 ):
39 matches.extend(
40 self.check_ip_block(
41 value.get("Fn::If")[1],
42 path=path[:] + [index_key, 1],
43 )
44 )
45 matches.extend(
46 self.check_ip_block(
47 value.get("Fn::If")[2],
48 path=path[:] + [index_key, 2],
49 )
50 )
51 else:
52 message = "Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or Select for {0}"
53 matches.append(
54 RuleMatch(
55 path, message.format("/".join(map(str, value)))
56 )
57 )
58 elif isinstance(value, (str)):
59 if not re.match(REGEX_CIDR, value):
60 message = "Cidr ipBlock should be a Cidr Range based string for {0}"
61 matches.append(
62 RuleMatch(path, message.format("/".join(map(str, path))))
63 )
64 else:
65 message = "Cidr ipBlock should be a string for {0}"
66 matches.append(RuleMatch(path, message.format("/".join(map(str, path)))))
67
68 return matches
69
70 def check_count(self, value, path):
71 matches = []
72 count_parameters = []
73 if isinstance(value, dict):
74 if len(value) == 1:
75 for index_key, index_value in value.items():
76 if index_key not in self.supported_functions:
77 if index_key == "Fn::If":
78 if len(value.get("Fn::If")) == 3 and isinstance(
79 value.get("Fn::If"), list
80 ):
81 matches.extend(
82 self.check_count(
83 value.get("Fn::If")[1],
84 path=path[:] + [index_key, 1],
85 )
86 )
87 matches.extend(
88 self.check_count(
89 value.get("Fn::If")[2],
90 path=path[:] + [index_key, 2],
91 )
92 )
93 else:
94 message = "Cidr count should be Int, Ref, or Select for {0}"
95 matches.append(
96 RuleMatch(
97 path, message.format("/".join(map(str, path)))
98 )
99 )
100 if index_key == "Ref":
101 count_parameters.append(index_value)
102 elif not isinstance(value, int):
103 message = "Cidr count should be a int for {0}"
104 extra_args = {
105 "actual_type": type(value).__name__,
106 "expected_type": int.__name__,
107 }
108 matches.append(
109 RuleMatch(path, message.format("/".join(map(str, path))), **extra_args)
110 )
111
112 return count_parameters, matches
113
114 def check_size_mask(self, value, path):
115 matches = []
116 size_mask_parameters = []
117 if isinstance(value, dict):
118 if len(value) == 1:
119 for index_key, index_value in value.items():
120 if index_key not in self.supported_functions:
121 if index_key == "Fn::If":
122 if len(value.get("Fn::If")) == 3 and isinstance(
123 value.get("Fn::If"), list
124 ):
125 matches.extend(
126 self.check_size_mask(
127 value.get("Fn::If")[1],
128 path=path[:] + [index_key, 1],
129 )
130 )
131 matches.extend(
132 self.check_size_mask(
133 value.get("Fn::If")[2],
134 path=path[:] + [index_key, 2],
135 )
136 )
137 else:
138 message = (
139 "Cidr sizeMask should be Int, Ref, or Select for {0}"
140 )
141 matches.append(
142 RuleMatch(
143 path, message.format("/".join(map(str, path)))
144 )
145 )
146 if index_key == "Ref":
147 size_mask_parameters.append(index_value)
148 elif not isinstance(value, int):
149 message = "Cidr sizeMask should be a int for {0}"
150 extra_args = {
151 "actual_type": type(value).__name__,
152 "expected_type": int.__name__,
153 }
154 matches.append(
155 RuleMatch(path, message.format("/".join(map(str, path))), **extra_args)
156 )
157
158 return size_mask_parameters, matches
159
160 def check_parameter_count(self, cfn, parameter_name):
161 """Check Count Parameter if used"""
162 matches = []
163 parameter_obj = cfn.get_parameters().get(parameter_name, {})
164 if parameter_obj:
165 tree = ["Parameters", parameter_name]
166 parameter_type = parameter_obj.get("Type")
167 if parameter_type == "Number":
168 max_value = parameter_obj.get("MaxValue")
169 min_value = parameter_obj.get("MinValue")
170 if (not min_value) or min_value < 1 or min_value > 256:
171 message = "Parameter for Cidr count have MinValue between 1 and 256 at {0}"
172 matches.append(
173 RuleMatch(
174 tree + ["MinValue"],
175 message.format("/".join(map(str, tree + ["MinValue"]))),
176 )
177 )
178 if (not max_value) or max_value < 1 or max_value > 256:
179 message = "Parameter for Cidr count have MaxValue between 1 and 256 at {0}"
180 matches.append(
181 RuleMatch(
182 tree + ["MaxValue"],
183 message.format("/".join(map(str, tree + ["MaxValue"]))),
184 )
185 )
186 else:
187 message = "Parameter for Cidr count have be of Type Number at {0}"
188 matches.append(
189 RuleMatch(tree, message.format("/".join(map(str, tree))))
190 )
191
192 return matches
193
194 def check_parameter_size_mask(self, cfn, parameter_name):
195 """Check SizeMask Parameter if used"""
196 matches = []
197 parameter_obj = cfn.get_parameters().get(parameter_name, {})
198 if parameter_obj:
199 tree = ["Parameters", parameter_name]
200 parameter_type = parameter_obj.get("Type")
201 if parameter_type == "Number":
202 max_value = parameter_obj.get("MaxValue")
203 min_value = parameter_obj.get("MinValue")
204 if (not min_value) or min_value < 1 or min_value > 256:
205 message = (
206 "Parameter for Cidr sizeMask have MinValue between 1 and "
207 "128 (for ipv6) and 32 (for ipv4) at {0}"
208 )
209 matches.append(
210 RuleMatch(
211 tree + ["MinValue"],
212 message.format("/".join(map(str, tree + ["MinValue"]))),
213 )
214 )
215 if (not max_value) or max_value < 1 or max_value > 256:
216 message = (
217 "Parameter for Cidr count have MaxValue between 1 and "
218 "128 (for ipv6) and 32 (for ipv4) at {0}"
219 )
220 matches.append(
221 RuleMatch(
222 tree + ["MaxValue"],
223 message.format("/".join(map(str, tree + ["MaxValue"]))),
224 )
225 )
226 else:
227 message = "Parameter for Cidr count have be of Type Number at {0}"
228 matches.append(
229 RuleMatch(tree, message.format("/".join(map(str, tree))))
230 )
231
232 return matches
233
234 def match(self, cfn):
235 matches = []
236
237 cidr_objs = cfn.search_deep_keys("Fn::Cidr")
238
239 count_parameters = []
240 size_mask_parameters = []
241
242 for cidr_obj in cidr_objs:
243 cidr_value_obj = cidr_obj[-1]
244 tree = cidr_obj[:-1]
245 if isinstance(cidr_value_obj, list):
246 if len(cidr_value_obj) in [2, 3]:
247 ip_block_obj = cidr_value_obj[0]
248 count_obj = cidr_value_obj[1]
249 if len(cidr_value_obj) == 3:
250 size_mask_obj = cidr_value_obj[2]
251 else:
252 size_mask_obj = None
253
254 matches.extend(self.check_ip_block(ip_block_obj, tree[:] + [0]))
255
256 new_count_parameters, new_matches = self.check_count(
257 count_obj, tree[:] + [1]
258 )
259 count_parameters.extend(new_count_parameters)
260 matches.extend(new_matches)
261
262 new_size_mask_parameters, new_matches = self.check_size_mask(
263 size_mask_obj, tree[:] + [2]
264 )
265 size_mask_parameters.extend(new_size_mask_parameters)
266 matches.extend(new_matches)
267
268 else:
269 message = "Cidr should be a list of 2 or 3 elements for {0}"
270 matches.append(
271 RuleMatch(tree, message.format("/".join(map(str, tree))))
272 )
273 else:
274 message = "Cidr should be a list of 2 or 3 elements for {0}"
275 matches.append(
276 RuleMatch(tree, message.format("/".join(map(str, tree))))
277 )
278
279 for count_parameter in set(count_parameters):
280 matches.extend(self.check_parameter_count(cfn, count_parameter))
281 for size_mask_parameter in set(size_mask_parameters):
282 matches.extend(self.check_parameter_size_mask(cfn, size_mask_parameter))
283
284 return matches
285
[end of src/cfnlint/rules/functions/Cidr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/functions/Cidr.py b/src/cfnlint/rules/functions/Cidr.py
--- a/src/cfnlint/rules/functions/Cidr.py
+++ b/src/cfnlint/rules/functions/Cidr.py
@@ -49,7 +49,10 @@
)
)
else:
- message = "Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or Select for {0}"
+ message = (
+ "Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or"
+ " Select for {0}"
+ )
matches.append(
RuleMatch(
path, message.format("/".join(map(str, value)))
@@ -78,18 +81,16 @@
if len(value.get("Fn::If")) == 3 and isinstance(
value.get("Fn::If"), list
):
- matches.extend(
- self.check_count(
- value.get("Fn::If")[1],
- path=path[:] + [index_key, 1],
- )
- )
- matches.extend(
- self.check_count(
- value.get("Fn::If")[2],
- path=path[:] + [index_key, 2],
+ for i in [1, 2]:
+ (
+ new_count_parameters,
+ new_matches,
+ ) = self.check_count(
+ value.get("Fn::If")[i],
+ path=path[:] + [index_key, i],
)
- )
+ count_parameters.extend(new_count_parameters)
+ matches.extend(new_matches)
else:
message = "Cidr count should be Int, Ref, or Select for {0}"
matches.append(
@@ -168,7 +169,10 @@
max_value = parameter_obj.get("MaxValue")
min_value = parameter_obj.get("MinValue")
if (not min_value) or min_value < 1 or min_value > 256:
- message = "Parameter for Cidr count have MinValue between 1 and 256 at {0}"
+ message = (
+ "Parameter for Cidr count have MinValue between 1 and 256"
+ " at {0}"
+ )
matches.append(
RuleMatch(
tree + ["MinValue"],
@@ -176,7 +180,10 @@
)
)
if (not max_value) or max_value < 1 or max_value > 256:
- message = "Parameter for Cidr count have MaxValue between 1 and 256 at {0}"
+ message = (
+ "Parameter for Cidr count have MaxValue between 1 and 256"
+ " at {0}"
+ )
matches.append(
RuleMatch(
tree + ["MaxValue"],
@@ -258,7 +265,6 @@
)
count_parameters.extend(new_count_parameters)
matches.extend(new_matches)
-
new_size_mask_parameters, new_matches = self.check_size_mask(
size_mask_obj, tree[:] + [2]
)
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/Cidr.py b/src/cfnlint/rules/functions/Cidr.py\n--- a/src/cfnlint/rules/functions/Cidr.py\n+++ b/src/cfnlint/rules/functions/Cidr.py\n@@ -49,7 +49,10 @@\n )\n )\n else:\n- message = \"Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or Select for {0}\"\n+ message = (\n+ \"Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or\"\n+ \" Select for {0}\"\n+ )\n matches.append(\n RuleMatch(\n path, message.format(\"/\".join(map(str, value)))\n@@ -78,18 +81,16 @@\n if len(value.get(\"Fn::If\")) == 3 and isinstance(\n value.get(\"Fn::If\"), list\n ):\n- matches.extend(\n- self.check_count(\n- value.get(\"Fn::If\")[1],\n- path=path[:] + [index_key, 1],\n- )\n- )\n- matches.extend(\n- self.check_count(\n- value.get(\"Fn::If\")[2],\n- path=path[:] + [index_key, 2],\n+ for i in [1, 2]:\n+ (\n+ new_count_parameters,\n+ new_matches,\n+ ) = self.check_count(\n+ value.get(\"Fn::If\")[i],\n+ path=path[:] + [index_key, i],\n )\n- )\n+ count_parameters.extend(new_count_parameters)\n+ matches.extend(new_matches)\n else:\n message = \"Cidr count should be Int, Ref, or Select for {0}\"\n matches.append(\n@@ -168,7 +169,10 @@\n max_value = parameter_obj.get(\"MaxValue\")\n min_value = parameter_obj.get(\"MinValue\")\n if (not min_value) or min_value < 1 or min_value > 256:\n- message = \"Parameter for Cidr count have MinValue between 1 and 256 at {0}\"\n+ message = (\n+ \"Parameter for Cidr count have MinValue between 1 and 256\"\n+ \" at {0}\"\n+ )\n matches.append(\n RuleMatch(\n tree + [\"MinValue\"],\n@@ -176,7 +180,10 @@\n )\n )\n if (not max_value) or max_value < 1 or max_value > 256:\n- message = \"Parameter for Cidr count have MaxValue between 1 and 256 at {0}\"\n+ message = (\n+ \"Parameter for Cidr count have MaxValue between 1 and 256\"\n+ \" at {0}\"\n+ )\n matches.append(\n RuleMatch(\n tree + [\"MaxValue\"],\n@@ -258,7 +265,6 @@\n )\n count_parameters.extend(new_count_parameters)\n matches.extend(new_matches)\n-\n new_size_mask_parameters, new_matches = self.check_size_mask(\n size_mask_obj, tree[:] + [2]\n )\n", "issue": "E0002: Unknown exception while processing rule E1024: 'list' object has no attribute 'path'\n### CloudFormation Lint Version\n\ncfn-lint 0.77.10\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nWhen validation checks run on a the Cidr function `!Cidr [ ipBlock, count, cidrBits ]`, the count and cidrBits doesn't validate more complex functions like the `!If` in the example.\n\n### Expected behavior\n\nDoesn't throw unknown exception\n\n### Reproduction template\n\n```yaml\r\nPublicSubnet1:\r\n Type: AWS::EC2::Subnet\r\n Properties:\r\n VpcId: !Ref VPC\r\n AvailabilityZone: !Select [0, !GetAZs \"\"]\r\n CidrBlock: !If\r\n - defineYourOwnSubnetCIDR\r\n - !Ref FirstTierSubnet1CIDR\r\n - !Select\r\n - 0\r\n - !Cidr\r\n - 10.0.0.0/24\r\n - !If [3AZ, 16, 8]\r\n - 4\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport regex as re\n\nfrom cfnlint.helpers import REGEX_CIDR\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Cidr(CloudFormationLintRule):\n \"\"\"Check if Cidr values are correct\"\"\"\n\n id = \"E1024\"\n shortdesc = \"Cidr validation of parameters\"\n description = \"Making sure the function CIDR is a list with valid values\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-cidr.html\"\n tags = [\"functions\", \"cidr\"]\n\n supported_functions = [\n \"Fn::FindInMap\",\n \"Fn::Select\",\n \"Ref\",\n \"Fn::GetAtt\",\n \"Fn::Sub\",\n \"Fn::ImportValue\",\n ]\n\n def check_ip_block(self, value, path):\n matches = []\n if isinstance(value, dict):\n if len(value) == 1:\n for index_key, _ in value.items():\n if index_key not in self.supported_functions:\n if index_key == \"Fn::If\":\n if len(value.get(\"Fn::If\")) == 3 and isinstance(\n value.get(\"Fn::If\"), list\n ):\n matches.extend(\n self.check_ip_block(\n value.get(\"Fn::If\")[1],\n path=path[:] + [index_key, 1],\n )\n )\n matches.extend(\n self.check_ip_block(\n value.get(\"Fn::If\")[2],\n path=path[:] + [index_key, 2],\n )\n )\n else:\n message = \"Cidr ipBlock should be Cidr Range, Ref, GetAtt, Sub or Select for {0}\"\n matches.append(\n RuleMatch(\n path, message.format(\"/\".join(map(str, value)))\n )\n )\n elif isinstance(value, (str)):\n if not re.match(REGEX_CIDR, value):\n message = \"Cidr ipBlock should be a Cidr Range based string for {0}\"\n matches.append(\n RuleMatch(path, message.format(\"/\".join(map(str, path))))\n )\n else:\n message = \"Cidr ipBlock should be a string for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n\n return matches\n\n def check_count(self, value, path):\n matches = []\n count_parameters = []\n if isinstance(value, dict):\n if len(value) == 1:\n for index_key, index_value in value.items():\n if index_key not in self.supported_functions:\n if index_key == \"Fn::If\":\n if len(value.get(\"Fn::If\")) == 3 and isinstance(\n value.get(\"Fn::If\"), list\n ):\n matches.extend(\n self.check_count(\n value.get(\"Fn::If\")[1],\n path=path[:] + [index_key, 1],\n )\n )\n matches.extend(\n self.check_count(\n value.get(\"Fn::If\")[2],\n path=path[:] + [index_key, 2],\n )\n )\n else:\n message = \"Cidr count should be Int, Ref, or Select for {0}\"\n matches.append(\n RuleMatch(\n path, message.format(\"/\".join(map(str, path)))\n )\n )\n if index_key == \"Ref\":\n count_parameters.append(index_value)\n elif not isinstance(value, int):\n message = \"Cidr count should be a int for {0}\"\n extra_args = {\n \"actual_type\": type(value).__name__,\n \"expected_type\": int.__name__,\n }\n matches.append(\n RuleMatch(path, message.format(\"/\".join(map(str, path))), **extra_args)\n )\n\n return count_parameters, matches\n\n def check_size_mask(self, value, path):\n matches = []\n size_mask_parameters = []\n if isinstance(value, dict):\n if len(value) == 1:\n for index_key, index_value in value.items():\n if index_key not in self.supported_functions:\n if index_key == \"Fn::If\":\n if len(value.get(\"Fn::If\")) == 3 and isinstance(\n value.get(\"Fn::If\"), list\n ):\n matches.extend(\n self.check_size_mask(\n value.get(\"Fn::If\")[1],\n path=path[:] + [index_key, 1],\n )\n )\n matches.extend(\n self.check_size_mask(\n value.get(\"Fn::If\")[2],\n path=path[:] + [index_key, 2],\n )\n )\n else:\n message = (\n \"Cidr sizeMask should be Int, Ref, or Select for {0}\"\n )\n matches.append(\n RuleMatch(\n path, message.format(\"/\".join(map(str, path)))\n )\n )\n if index_key == \"Ref\":\n size_mask_parameters.append(index_value)\n elif not isinstance(value, int):\n message = \"Cidr sizeMask should be a int for {0}\"\n extra_args = {\n \"actual_type\": type(value).__name__,\n \"expected_type\": int.__name__,\n }\n matches.append(\n RuleMatch(path, message.format(\"/\".join(map(str, path))), **extra_args)\n )\n\n return size_mask_parameters, matches\n\n def check_parameter_count(self, cfn, parameter_name):\n \"\"\"Check Count Parameter if used\"\"\"\n matches = []\n parameter_obj = cfn.get_parameters().get(parameter_name, {})\n if parameter_obj:\n tree = [\"Parameters\", parameter_name]\n parameter_type = parameter_obj.get(\"Type\")\n if parameter_type == \"Number\":\n max_value = parameter_obj.get(\"MaxValue\")\n min_value = parameter_obj.get(\"MinValue\")\n if (not min_value) or min_value < 1 or min_value > 256:\n message = \"Parameter for Cidr count have MinValue between 1 and 256 at {0}\"\n matches.append(\n RuleMatch(\n tree + [\"MinValue\"],\n message.format(\"/\".join(map(str, tree + [\"MinValue\"]))),\n )\n )\n if (not max_value) or max_value < 1 or max_value > 256:\n message = \"Parameter for Cidr count have MaxValue between 1 and 256 at {0}\"\n matches.append(\n RuleMatch(\n tree + [\"MaxValue\"],\n message.format(\"/\".join(map(str, tree + [\"MaxValue\"]))),\n )\n )\n else:\n message = \"Parameter for Cidr count have be of Type Number at {0}\"\n matches.append(\n RuleMatch(tree, message.format(\"/\".join(map(str, tree))))\n )\n\n return matches\n\n def check_parameter_size_mask(self, cfn, parameter_name):\n \"\"\"Check SizeMask Parameter if used\"\"\"\n matches = []\n parameter_obj = cfn.get_parameters().get(parameter_name, {})\n if parameter_obj:\n tree = [\"Parameters\", parameter_name]\n parameter_type = parameter_obj.get(\"Type\")\n if parameter_type == \"Number\":\n max_value = parameter_obj.get(\"MaxValue\")\n min_value = parameter_obj.get(\"MinValue\")\n if (not min_value) or min_value < 1 or min_value > 256:\n message = (\n \"Parameter for Cidr sizeMask have MinValue between 1 and \"\n \"128 (for ipv6) and 32 (for ipv4) at {0}\"\n )\n matches.append(\n RuleMatch(\n tree + [\"MinValue\"],\n message.format(\"/\".join(map(str, tree + [\"MinValue\"]))),\n )\n )\n if (not max_value) or max_value < 1 or max_value > 256:\n message = (\n \"Parameter for Cidr count have MaxValue between 1 and \"\n \"128 (for ipv6) and 32 (for ipv4) at {0}\"\n )\n matches.append(\n RuleMatch(\n tree + [\"MaxValue\"],\n message.format(\"/\".join(map(str, tree + [\"MaxValue\"]))),\n )\n )\n else:\n message = \"Parameter for Cidr count have be of Type Number at {0}\"\n matches.append(\n RuleMatch(tree, message.format(\"/\".join(map(str, tree))))\n )\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n cidr_objs = cfn.search_deep_keys(\"Fn::Cidr\")\n\n count_parameters = []\n size_mask_parameters = []\n\n for cidr_obj in cidr_objs:\n cidr_value_obj = cidr_obj[-1]\n tree = cidr_obj[:-1]\n if isinstance(cidr_value_obj, list):\n if len(cidr_value_obj) in [2, 3]:\n ip_block_obj = cidr_value_obj[0]\n count_obj = cidr_value_obj[1]\n if len(cidr_value_obj) == 3:\n size_mask_obj = cidr_value_obj[2]\n else:\n size_mask_obj = None\n\n matches.extend(self.check_ip_block(ip_block_obj, tree[:] + [0]))\n\n new_count_parameters, new_matches = self.check_count(\n count_obj, tree[:] + [1]\n )\n count_parameters.extend(new_count_parameters)\n matches.extend(new_matches)\n\n new_size_mask_parameters, new_matches = self.check_size_mask(\n size_mask_obj, tree[:] + [2]\n )\n size_mask_parameters.extend(new_size_mask_parameters)\n matches.extend(new_matches)\n\n else:\n message = \"Cidr should be a list of 2 or 3 elements for {0}\"\n matches.append(\n RuleMatch(tree, message.format(\"/\".join(map(str, tree))))\n )\n else:\n message = \"Cidr should be a list of 2 or 3 elements for {0}\"\n matches.append(\n RuleMatch(tree, message.format(\"/\".join(map(str, tree))))\n )\n\n for count_parameter in set(count_parameters):\n matches.extend(self.check_parameter_count(cfn, count_parameter))\n for size_mask_parameter in set(size_mask_parameters):\n matches.extend(self.check_parameter_size_mask(cfn, size_mask_parameter))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Cidr.py"}]} | 3,773 | 695 |
gh_patches_debug_29073 | rasdani/github-patches | git_diff | python-discord__bot-680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Write unit tests for `bot/utils/time.py`
Write unit tests for [`bot/utils/time.py`](../blob/master/bot/utils/time.py). This file already has some unit tests, but they are written for `pytest`. The tests are currently located in [`tests/utils/test_time.py`](../blob/master/tests/utils/test_time.py), but should be moved to the appropriate location in the folder hierarchy, `tests/bot/utils/test_time.py` after they have been migrated to the `unittest` framework.
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
</issue>
<code>
[start of bot/utils/time.py]
1 import asyncio
2 import datetime
3 from typing import Optional
4
5 import dateutil.parser
6 from dateutil.relativedelta import relativedelta
7
8 RFC1123_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
9 INFRACTION_FORMAT = "%Y-%m-%d %H:%M"
10
11
12 def _stringify_time_unit(value: int, unit: str) -> str:
13 """
14 Returns a string to represent a value and time unit, ensuring that it uses the right plural form of the unit.
15
16 >>> _stringify_time_unit(1, "seconds")
17 "1 second"
18 >>> _stringify_time_unit(24, "hours")
19 "24 hours"
20 >>> _stringify_time_unit(0, "minutes")
21 "less than a minute"
22 """
23 if value == 1:
24 return f"{value} {unit[:-1]}"
25 elif value == 0:
26 return f"less than a {unit[:-1]}"
27 else:
28 return f"{value} {unit}"
29
30
31 def humanize_delta(delta: relativedelta, precision: str = "seconds", max_units: int = 6) -> str:
32 """
33 Returns a human-readable version of the relativedelta.
34
35 precision specifies the smallest unit of time to include (e.g. "seconds", "minutes").
36 max_units specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).
37 """
38 if max_units <= 0:
39 raise ValueError("max_units must be positive")
40
41 units = (
42 ("years", delta.years),
43 ("months", delta.months),
44 ("days", delta.days),
45 ("hours", delta.hours),
46 ("minutes", delta.minutes),
47 ("seconds", delta.seconds),
48 )
49
50 # Add the time units that are >0, but stop at accuracy or max_units.
51 time_strings = []
52 unit_count = 0
53 for unit, value in units:
54 if value:
55 time_strings.append(_stringify_time_unit(value, unit))
56 unit_count += 1
57
58 if unit == precision or unit_count >= max_units:
59 break
60
61 # Add the 'and' between the last two units, if necessary
62 if len(time_strings) > 1:
63 time_strings[-1] = f"{time_strings[-2]} and {time_strings[-1]}"
64 del time_strings[-2]
65
66 # If nothing has been found, just make the value 0 precision, e.g. `0 days`.
67 if not time_strings:
68 humanized = _stringify_time_unit(0, precision)
69 else:
70 humanized = ", ".join(time_strings)
71
72 return humanized
73
74
75 def time_since(past_datetime: datetime.datetime, precision: str = "seconds", max_units: int = 6) -> str:
76 """
77 Takes a datetime and returns a human-readable string that describes how long ago that datetime was.
78
79 precision specifies the smallest unit of time to include (e.g. "seconds", "minutes").
80 max_units specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).
81 """
82 now = datetime.datetime.utcnow()
83 delta = abs(relativedelta(now, past_datetime))
84
85 humanized = humanize_delta(delta, precision, max_units)
86
87 return f"{humanized} ago"
88
89
90 def parse_rfc1123(stamp: str) -> datetime.datetime:
91 """Parse RFC1123 time string into datetime."""
92 return datetime.datetime.strptime(stamp, RFC1123_FORMAT).replace(tzinfo=datetime.timezone.utc)
93
94
95 # Hey, this could actually be used in the off_topic_names and reddit cogs :)
96 async def wait_until(time: datetime.datetime, start: Optional[datetime.datetime] = None) -> None:
97 """
98 Wait until a given time.
99
100 :param time: A datetime.datetime object to wait until.
101 :param start: The start from which to calculate the waiting duration. Defaults to UTC time.
102 """
103 delay = time - (start or datetime.datetime.utcnow())
104 delay_seconds = delay.total_seconds()
105
106 # Incorporate a small delay so we don't rapid-fire the event due to time precision errors
107 if delay_seconds > 1.0:
108 await asyncio.sleep(delay_seconds)
109
110
111 def format_infraction(timestamp: str) -> str:
112 """Format an infraction timestamp to a more readable ISO 8601 format."""
113 return dateutil.parser.isoparse(timestamp).strftime(INFRACTION_FORMAT)
114
115
116 def format_infraction_with_duration(
117 expiry: Optional[str],
118 date_from: datetime.datetime = None,
119 max_units: int = 2
120 ) -> Optional[str]:
121 """
122 Format an infraction timestamp to a more readable ISO 8601 format WITH the duration.
123
124 Returns a human-readable version of the duration between datetime.utcnow() and an expiry.
125 Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.
126 `max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).
127 By default, max_units is 2.
128 """
129 if not expiry:
130 return None
131
132 date_from = date_from or datetime.datetime.utcnow()
133 date_to = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)
134
135 expiry_formatted = format_infraction(expiry)
136
137 duration = humanize_delta(relativedelta(date_to, date_from), max_units=max_units)
138 duration_formatted = f" ({duration})" if duration else ''
139
140 return f"{expiry_formatted}{duration_formatted}"
141
142
143 def until_expiration(expiry: Optional[str], max_units: int = 2) -> Optional[str]:
144 """
145 Get the remaining time until infraction's expiration, in a human-readable version of the relativedelta.
146
147 Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.
148 `max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).
149 By default, max_units is 2.
150 """
151 if not expiry:
152 return None
153
154 now = datetime.datetime.utcnow()
155 since = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)
156
157 if since < now:
158 return None
159
160 return humanize_delta(relativedelta(since, now), max_units=max_units)
161
[end of bot/utils/time.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/utils/time.py b/bot/utils/time.py
--- a/bot/utils/time.py
+++ b/bot/utils/time.py
@@ -115,7 +115,7 @@
def format_infraction_with_duration(
expiry: Optional[str],
- date_from: datetime.datetime = None,
+ date_from: Optional[datetime.datetime] = None,
max_units: int = 2
) -> Optional[str]:
"""
@@ -140,10 +140,15 @@
return f"{expiry_formatted}{duration_formatted}"
-def until_expiration(expiry: Optional[str], max_units: int = 2) -> Optional[str]:
+def until_expiration(
+ expiry: Optional[str],
+ now: Optional[datetime.datetime] = None,
+ max_units: int = 2
+) -> Optional[str]:
"""
Get the remaining time until infraction's expiration, in a human-readable version of the relativedelta.
+ Returns a human-readable version of the remaining duration between datetime.utcnow() and an expiry.
Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.
`max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).
By default, max_units is 2.
@@ -151,7 +156,7 @@
if not expiry:
return None
- now = datetime.datetime.utcnow()
+ now = now or datetime.datetime.utcnow()
since = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)
if since < now:
| {"golden_diff": "diff --git a/bot/utils/time.py b/bot/utils/time.py\n--- a/bot/utils/time.py\n+++ b/bot/utils/time.py\n@@ -115,7 +115,7 @@\n \n def format_infraction_with_duration(\n expiry: Optional[str],\n- date_from: datetime.datetime = None,\n+ date_from: Optional[datetime.datetime] = None,\n max_units: int = 2\n ) -> Optional[str]:\n \"\"\"\n@@ -140,10 +140,15 @@\n return f\"{expiry_formatted}{duration_formatted}\"\n \n \n-def until_expiration(expiry: Optional[str], max_units: int = 2) -> Optional[str]:\n+def until_expiration(\n+ expiry: Optional[str],\n+ now: Optional[datetime.datetime] = None,\n+ max_units: int = 2\n+) -> Optional[str]:\n \"\"\"\n Get the remaining time until infraction's expiration, in a human-readable version of the relativedelta.\n \n+ Returns a human-readable version of the remaining duration between datetime.utcnow() and an expiry.\n Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.\n `max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).\n By default, max_units is 2.\n@@ -151,7 +156,7 @@\n if not expiry:\n return None\n \n- now = datetime.datetime.utcnow()\n+ now = now or datetime.datetime.utcnow()\n since = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)\n \n if since < now:\n", "issue": "Write unit tests for `bot/utils/time.py`\nWrite unit tests for [`bot/utils/time.py`](../blob/master/bot/utils/time.py). This file already has some unit tests, but they are written for `pytest`. The tests are currently located in [`tests/utils/test_time.py`](../blob/master/tests/utils/test_time.py), but should be moved to the appropriate location in the folder hierarchy, `tests/bot/utils/test_time.py` after they have been migrated to the `unittest` framework.\r\n\r\n## Implementation details\r\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\r\n\r\n## Additional information\r\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\r\n\n", "before_files": [{"content": "import asyncio\nimport datetime\nfrom typing import Optional\n\nimport dateutil.parser\nfrom dateutil.relativedelta import relativedelta\n\nRFC1123_FORMAT = \"%a, %d %b %Y %H:%M:%S GMT\"\nINFRACTION_FORMAT = \"%Y-%m-%d %H:%M\"\n\n\ndef _stringify_time_unit(value: int, unit: str) -> str:\n \"\"\"\n Returns a string to represent a value and time unit, ensuring that it uses the right plural form of the unit.\n\n >>> _stringify_time_unit(1, \"seconds\")\n \"1 second\"\n >>> _stringify_time_unit(24, \"hours\")\n \"24 hours\"\n >>> _stringify_time_unit(0, \"minutes\")\n \"less than a minute\"\n \"\"\"\n if value == 1:\n return f\"{value} {unit[:-1]}\"\n elif value == 0:\n return f\"less than a {unit[:-1]}\"\n else:\n return f\"{value} {unit}\"\n\n\ndef humanize_delta(delta: relativedelta, precision: str = \"seconds\", max_units: int = 6) -> str:\n \"\"\"\n Returns a human-readable version of the relativedelta.\n\n precision specifies the smallest unit of time to include (e.g. \"seconds\", \"minutes\").\n max_units specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).\n \"\"\"\n if max_units <= 0:\n raise ValueError(\"max_units must be positive\")\n\n units = (\n (\"years\", delta.years),\n (\"months\", delta.months),\n (\"days\", delta.days),\n (\"hours\", delta.hours),\n (\"minutes\", delta.minutes),\n (\"seconds\", delta.seconds),\n )\n\n # Add the time units that are >0, but stop at accuracy or max_units.\n time_strings = []\n unit_count = 0\n for unit, value in units:\n if value:\n time_strings.append(_stringify_time_unit(value, unit))\n unit_count += 1\n\n if unit == precision or unit_count >= max_units:\n break\n\n # Add the 'and' between the last two units, if necessary\n if len(time_strings) > 1:\n time_strings[-1] = f\"{time_strings[-2]} and {time_strings[-1]}\"\n del time_strings[-2]\n\n # If nothing has been found, just make the value 0 precision, e.g. `0 days`.\n if not time_strings:\n humanized = _stringify_time_unit(0, precision)\n else:\n humanized = \", \".join(time_strings)\n\n return humanized\n\n\ndef time_since(past_datetime: datetime.datetime, precision: str = \"seconds\", max_units: int = 6) -> str:\n \"\"\"\n Takes a datetime and returns a human-readable string that describes how long ago that datetime was.\n\n precision specifies the smallest unit of time to include (e.g. \"seconds\", \"minutes\").\n max_units specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).\n \"\"\"\n now = datetime.datetime.utcnow()\n delta = abs(relativedelta(now, past_datetime))\n\n humanized = humanize_delta(delta, precision, max_units)\n\n return f\"{humanized} ago\"\n\n\ndef parse_rfc1123(stamp: str) -> datetime.datetime:\n \"\"\"Parse RFC1123 time string into datetime.\"\"\"\n return datetime.datetime.strptime(stamp, RFC1123_FORMAT).replace(tzinfo=datetime.timezone.utc)\n\n\n# Hey, this could actually be used in the off_topic_names and reddit cogs :)\nasync def wait_until(time: datetime.datetime, start: Optional[datetime.datetime] = None) -> None:\n \"\"\"\n Wait until a given time.\n\n :param time: A datetime.datetime object to wait until.\n :param start: The start from which to calculate the waiting duration. Defaults to UTC time.\n \"\"\"\n delay = time - (start or datetime.datetime.utcnow())\n delay_seconds = delay.total_seconds()\n\n # Incorporate a small delay so we don't rapid-fire the event due to time precision errors\n if delay_seconds > 1.0:\n await asyncio.sleep(delay_seconds)\n\n\ndef format_infraction(timestamp: str) -> str:\n \"\"\"Format an infraction timestamp to a more readable ISO 8601 format.\"\"\"\n return dateutil.parser.isoparse(timestamp).strftime(INFRACTION_FORMAT)\n\n\ndef format_infraction_with_duration(\n expiry: Optional[str],\n date_from: datetime.datetime = None,\n max_units: int = 2\n) -> Optional[str]:\n \"\"\"\n Format an infraction timestamp to a more readable ISO 8601 format WITH the duration.\n\n Returns a human-readable version of the duration between datetime.utcnow() and an expiry.\n Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.\n `max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).\n By default, max_units is 2.\n \"\"\"\n if not expiry:\n return None\n\n date_from = date_from or datetime.datetime.utcnow()\n date_to = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)\n\n expiry_formatted = format_infraction(expiry)\n\n duration = humanize_delta(relativedelta(date_to, date_from), max_units=max_units)\n duration_formatted = f\" ({duration})\" if duration else ''\n\n return f\"{expiry_formatted}{duration_formatted}\"\n\n\ndef until_expiration(expiry: Optional[str], max_units: int = 2) -> Optional[str]:\n \"\"\"\n Get the remaining time until infraction's expiration, in a human-readable version of the relativedelta.\n\n Unlike `humanize_delta`, this function will force the `precision` to be `seconds` by not passing it.\n `max_units` specifies the maximum number of units of time to include (e.g. 1 may include days but not hours).\n By default, max_units is 2.\n \"\"\"\n if not expiry:\n return None\n\n now = datetime.datetime.utcnow()\n since = dateutil.parser.isoparse(expiry).replace(tzinfo=None, microsecond=0)\n\n if since < now:\n return None\n\n return humanize_delta(relativedelta(since, now), max_units=max_units)\n", "path": "bot/utils/time.py"}]} | 2,576 | 366 |
gh_patches_debug_7613 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when using ./tools/embeddings_to_torch.py
**I am getting the following error.
Is it harmful and anyone know how to solve it?**
[2018-09-24 21:06:09,964 INFO] From: ./glove_experiment/data.vocab.pt
[2018-09-24 21:06:09,964 INFO] * source vocab: 50002 words
[2018-09-24 21:06:09,964 INFO] * target vocab: 50004 words
[2018-09-24 21:06:42,008 INFO] Got 400000 encryption embeddings from ./glove/original.txt
[2018-09-24 21:08:21,394 INFO] Got 1142358 decryption embeddings from ./glove/wiki.fr.vec
[2018-09-24 21:08:21,699 INFO]
Matching:
[2018-09-24 21:08:21,699 INFO] * enc: 19625 match, 30377 missing, (39.25%)
[2018-09-24 21:08:21,699 INFO] * dec: 1071 match, 48933 missing, (2.14%)
[2018-09-24 21:08:21,699 INFO]
Filtered embeddings:
--- Logging error ---
Traceback (most recent call last):
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 993, in emit
msg = self.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 839, in format
return fmt.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 576, in format
record.message = record.getMessage()
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 338, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "./tools/embeddings_to_torch.py", line 148, in <module>
main()
File "./tools/embeddings_to_torch.py", line 134, in main
logger.info("\t* enc: ", filtered_enc_embeddings.size())
Message: '\t* enc: '
Arguments: (torch.Size([50002, 300]),)
--- Logging error ---
Traceback (most recent call last):
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 993, in emit
msg = self.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 839, in format
return fmt.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 576, in format
record.message = record.getMessage()
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 338, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "./tools/embeddings_to_torch.py", line 148, in <module>
main()
File "./tools/embeddings_to_torch.py", line 134, in main
logger.info("\t* enc: ", filtered_enc_embeddings.size())
Message: '\t* enc: '
Arguments: (torch.Size([50002, 300]),)
--- Logging error ---
Traceback (most recent call last):
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 993, in emit
msg = self.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 839, in format
return fmt.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 576, in format
record.message = record.getMessage()
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 338, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "./tools/embeddings_to_torch.py", line 148, in <module>
main()
File "./tools/embeddings_to_torch.py", line 135, in main
logger.info("\t* dec: ", filtered_dec_embeddings.size())
Message: '\t* dec: '
Arguments: (torch.Size([50004, 300]),)
--- Logging error ---
Traceback (most recent call last):
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 993, in emit
msg = self.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 839, in format
return fmt.format(record)
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 576, in format
record.message = record.getMessage()
File "/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py", line 338, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "./tools/embeddings_to_torch.py", line 148, in <module>
main()
File "./tools/embeddings_to_torch.py", line 135, in main
logger.info("\t* dec: ", filtered_dec_embeddings.size())
Message: '\t* dec: '
Arguments: (torch.Size([50004, 300]),)
[2018-09-24 21:08:21,701 INFO]
Saving embedding as:
* enc: ./glove_experiment/embeddings.enc.pt
* dec: ./glove_experiment/embeddings.dec.pt
[2018-09-24 21:08:22,065 INFO]
Done.
</issue>
<code>
[start of tools/embeddings_to_torch.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import print_function
4 from __future__ import division
5 import six
6 import sys
7 import numpy as np
8 import argparse
9 import torch
10 from onmt.utils.logging import init_logger, logger
11
12
13 def get_vocabs(dict_file):
14 vocabs = torch.load(dict_file)
15
16 enc_vocab, dec_vocab = None, None
17
18 # the vocab object is a list of tuple (name, torchtext.Vocab)
19 # we iterate over this list and associate vocabularies based on the name
20 for vocab in vocabs:
21 if vocab[0] == 'src':
22 enc_vocab = vocab[1]
23 if vocab[0] == 'tgt':
24 dec_vocab = vocab[1]
25 assert enc_vocab is not None and dec_vocab is not None
26
27 logger.info("From: %s" % dict_file)
28 logger.info("\t* source vocab: %d words" % len(enc_vocab))
29 logger.info("\t* target vocab: %d words" % len(dec_vocab))
30
31 return enc_vocab, dec_vocab
32
33
34 def get_embeddings(file_enc, opt, flag):
35 embs = dict()
36 if flag == 'enc':
37 for (i, l) in enumerate(open(file_enc, 'rb')):
38 if i < opt.skip_lines:
39 continue
40 if not l:
41 break
42 if len(l) == 0:
43 continue
44
45 l_split = l.decode('utf8').strip().split(' ')
46 if len(l_split) == 2:
47 continue
48 embs[l_split[0]] = [float(em) for em in l_split[1:]]
49 logger.info("Got {} encryption embeddings from {}".format(len(embs),
50 file_enc))
51 else:
52
53 for (i, l) in enumerate(open(file_enc, 'rb')):
54 if not l:
55 break
56 if len(l) == 0:
57 continue
58
59 l_split = l.decode('utf8').strip().split(' ')
60 if len(l_split) == 2:
61 continue
62 embs[l_split[0]] = [float(em) for em in l_split[1:]]
63 logger.info("Got {} decryption embeddings from {}".format(len(embs),
64 file_enc))
65 return embs
66
67
68 def match_embeddings(vocab, emb, opt):
69 dim = len(six.next(six.itervalues(emb)))
70 filtered_embeddings = np.zeros((len(vocab), dim))
71 count = {"match": 0, "miss": 0}
72 for w, w_id in vocab.stoi.items():
73 if w in emb:
74 filtered_embeddings[w_id] = emb[w]
75 count['match'] += 1
76 else:
77 if opt.verbose:
78 logger.info(u"not found:\t{}".format(w), file=sys.stderr)
79 count['miss'] += 1
80
81 return torch.Tensor(filtered_embeddings), count
82
83
84 TYPES = ["GloVe", "word2vec"]
85
86
87 def main():
88
89 parser = argparse.ArgumentParser(description='embeddings_to_torch.py')
90 parser.add_argument('-emb_file_enc', required=True,
91 help="source Embeddings from this file")
92 parser.add_argument('-emb_file_dec', required=True,
93 help="target Embeddings from this file")
94 parser.add_argument('-output_file', required=True,
95 help="Output file for the prepared data")
96 parser.add_argument('-dict_file', required=True,
97 help="Dictionary file")
98 parser.add_argument('-verbose', action="store_true", default=False)
99 parser.add_argument('-skip_lines', type=int, default=0,
100 help="Skip first lines of the embedding file")
101 parser.add_argument('-type', choices=TYPES, default="GloVe")
102 opt = parser.parse_args()
103
104 enc_vocab, dec_vocab = get_vocabs(opt.dict_file)
105 if opt.type == "word2vec":
106 opt.skip_lines = 1
107
108 embeddings_enc = get_embeddings(opt.emb_file_enc, opt, flag='enc')
109 embeddings_dec = get_embeddings(opt.emb_file_dec, opt, flag='dec')
110
111 filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab,
112 embeddings_enc,
113 opt)
114 filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab,
115 embeddings_dec,
116 opt)
117 logger.info("\nMatching: ")
118 match_percent = [_['match'] / (_['match'] + _['miss']) * 100
119 for _ in [enc_count, dec_count]]
120 logger.info("\t* enc: %d match, %d missing, (%.2f%%)"
121 % (enc_count['match'],
122 enc_count['miss'],
123 match_percent[0]))
124 logger.info("\t* dec: %d match, %d missing, (%.2f%%)"
125 % (dec_count['match'],
126 dec_count['miss'],
127 match_percent[1]))
128
129 logger.info("\nFiltered embeddings:")
130 logger.info("\t* enc: ", filtered_enc_embeddings.size())
131 logger.info("\t* dec: ", filtered_dec_embeddings.size())
132
133 enc_output_file = opt.output_file + ".enc.pt"
134 dec_output_file = opt.output_file + ".dec.pt"
135 logger.info("\nSaving embedding as:\n\t* enc: %s\n\t* dec: %s"
136 % (enc_output_file, dec_output_file))
137 torch.save(filtered_enc_embeddings, enc_output_file)
138 torch.save(filtered_dec_embeddings, dec_output_file)
139 logger.info("\nDone.")
140
141
142 if __name__ == "__main__":
143 init_logger('embeddings_to_torch.log')
144 main()
145
[end of tools/embeddings_to_torch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/embeddings_to_torch.py b/tools/embeddings_to_torch.py
--- a/tools/embeddings_to_torch.py
+++ b/tools/embeddings_to_torch.py
@@ -127,8 +127,8 @@
match_percent[1]))
logger.info("\nFiltered embeddings:")
- logger.info("\t* enc: ", filtered_enc_embeddings.size())
- logger.info("\t* dec: ", filtered_dec_embeddings.size())
+ logger.info("\t* enc: %s" % str(filtered_enc_embeddings.size()))
+ logger.info("\t* dec: %s" % str(filtered_dec_embeddings.size()))
enc_output_file = opt.output_file + ".enc.pt"
dec_output_file = opt.output_file + ".dec.pt"
| {"golden_diff": "diff --git a/tools/embeddings_to_torch.py b/tools/embeddings_to_torch.py\n--- a/tools/embeddings_to_torch.py\n+++ b/tools/embeddings_to_torch.py\n@@ -127,8 +127,8 @@\n match_percent[1]))\n \n logger.info(\"\\nFiltered embeddings:\")\n- logger.info(\"\\t* enc: \", filtered_enc_embeddings.size())\n- logger.info(\"\\t* dec: \", filtered_dec_embeddings.size())\n+ logger.info(\"\\t* enc: %s\" % str(filtered_enc_embeddings.size()))\n+ logger.info(\"\\t* dec: %s\" % str(filtered_dec_embeddings.size()))\n \n enc_output_file = opt.output_file + \".enc.pt\"\n dec_output_file = opt.output_file + \".dec.pt\"\n", "issue": "Error when using ./tools/embeddings_to_torch.py\n**I am getting the following error.\r\nIs it harmful and anyone know how to solve it?**\r\n\r\n\r\n[2018-09-24 21:06:09,964 INFO] From: ./glove_experiment/data.vocab.pt\r\n[2018-09-24 21:06:09,964 INFO] \t* source vocab: 50002 words\r\n[2018-09-24 21:06:09,964 INFO] \t* target vocab: 50004 words\r\n[2018-09-24 21:06:42,008 INFO] Got 400000 encryption embeddings from ./glove/original.txt\r\n[2018-09-24 21:08:21,394 INFO] Got 1142358 decryption embeddings from ./glove/wiki.fr.vec\r\n[2018-09-24 21:08:21,699 INFO] \r\nMatching: \r\n[2018-09-24 21:08:21,699 INFO] \t* enc: 19625 match, 30377 missing, (39.25%)\r\n[2018-09-24 21:08:21,699 INFO] \t* dec: 1071 match, 48933 missing, (2.14%)\r\n[2018-09-24 21:08:21,699 INFO] \r\nFiltered embeddings:\r\n--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 993, in emit\r\n msg = self.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 839, in format\r\n return fmt.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 576, in format\r\n record.message = record.getMessage()\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 338, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\nCall stack:\r\n File \"./tools/embeddings_to_torch.py\", line 148, in <module>\r\n main()\r\n File \"./tools/embeddings_to_torch.py\", line 134, in main\r\n logger.info(\"\\t* enc: \", filtered_enc_embeddings.size())\r\nMessage: '\\t* enc: '\r\nArguments: (torch.Size([50002, 300]),)\r\n--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 993, in emit\r\n msg = self.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 839, in format\r\n return fmt.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 576, in format\r\n record.message = record.getMessage()\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 338, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\nCall stack:\r\n File \"./tools/embeddings_to_torch.py\", line 148, in <module>\r\n main()\r\n File \"./tools/embeddings_to_torch.py\", line 134, in main\r\n logger.info(\"\\t* enc: \", filtered_enc_embeddings.size())\r\nMessage: '\\t* enc: '\r\nArguments: (torch.Size([50002, 300]),)\r\n--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 993, in emit\r\n msg = self.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 839, in format\r\n return fmt.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 576, in format\r\n record.message = record.getMessage()\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 338, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\nCall stack:\r\n File \"./tools/embeddings_to_torch.py\", line 148, in <module>\r\n main()\r\n File \"./tools/embeddings_to_torch.py\", line 135, in main\r\n logger.info(\"\\t* dec: \", filtered_dec_embeddings.size())\r\nMessage: '\\t* dec: '\r\nArguments: (torch.Size([50004, 300]),)\r\n--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 993, in emit\r\n msg = self.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 839, in format\r\n return fmt.format(record)\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 576, in format\r\n record.message = record.getMessage()\r\n File \"/home/eeb439/anaconda3/envs/pytorch0.4text0.3/lib/python3.6/logging/__init__.py\", line 338, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\nCall stack:\r\n File \"./tools/embeddings_to_torch.py\", line 148, in <module>\r\n main()\r\n File \"./tools/embeddings_to_torch.py\", line 135, in main\r\n logger.info(\"\\t* dec: \", filtered_dec_embeddings.size())\r\nMessage: '\\t* dec: '\r\nArguments: (torch.Size([50004, 300]),)\r\n[2018-09-24 21:08:21,701 INFO] \r\nSaving embedding as:\r\n\t* enc: ./glove_experiment/embeddings.enc.pt\r\n\t* dec: ./glove_experiment/embeddings.dec.pt\r\n[2018-09-24 21:08:22,065 INFO] \r\nDone.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import division\nimport six\nimport sys\nimport numpy as np\nimport argparse\nimport torch\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef get_vocabs(dict_file):\n vocabs = torch.load(dict_file)\n\n enc_vocab, dec_vocab = None, None\n\n # the vocab object is a list of tuple (name, torchtext.Vocab)\n # we iterate over this list and associate vocabularies based on the name\n for vocab in vocabs:\n if vocab[0] == 'src':\n enc_vocab = vocab[1]\n if vocab[0] == 'tgt':\n dec_vocab = vocab[1]\n assert enc_vocab is not None and dec_vocab is not None\n\n logger.info(\"From: %s\" % dict_file)\n logger.info(\"\\t* source vocab: %d words\" % len(enc_vocab))\n logger.info(\"\\t* target vocab: %d words\" % len(dec_vocab))\n\n return enc_vocab, dec_vocab\n\n\ndef get_embeddings(file_enc, opt, flag):\n embs = dict()\n if flag == 'enc':\n for (i, l) in enumerate(open(file_enc, 'rb')):\n if i < opt.skip_lines:\n continue\n if not l:\n break\n if len(l) == 0:\n continue\n\n l_split = l.decode('utf8').strip().split(' ')\n if len(l_split) == 2:\n continue\n embs[l_split[0]] = [float(em) for em in l_split[1:]]\n logger.info(\"Got {} encryption embeddings from {}\".format(len(embs),\n file_enc))\n else:\n\n for (i, l) in enumerate(open(file_enc, 'rb')):\n if not l:\n break\n if len(l) == 0:\n continue\n\n l_split = l.decode('utf8').strip().split(' ')\n if len(l_split) == 2:\n continue\n embs[l_split[0]] = [float(em) for em in l_split[1:]]\n logger.info(\"Got {} decryption embeddings from {}\".format(len(embs),\n file_enc))\n return embs\n\n\ndef match_embeddings(vocab, emb, opt):\n dim = len(six.next(six.itervalues(emb)))\n filtered_embeddings = np.zeros((len(vocab), dim))\n count = {\"match\": 0, \"miss\": 0}\n for w, w_id in vocab.stoi.items():\n if w in emb:\n filtered_embeddings[w_id] = emb[w]\n count['match'] += 1\n else:\n if opt.verbose:\n logger.info(u\"not found:\\t{}\".format(w), file=sys.stderr)\n count['miss'] += 1\n\n return torch.Tensor(filtered_embeddings), count\n\n\nTYPES = [\"GloVe\", \"word2vec\"]\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='embeddings_to_torch.py')\n parser.add_argument('-emb_file_enc', required=True,\n help=\"source Embeddings from this file\")\n parser.add_argument('-emb_file_dec', required=True,\n help=\"target Embeddings from this file\")\n parser.add_argument('-output_file', required=True,\n help=\"Output file for the prepared data\")\n parser.add_argument('-dict_file', required=True,\n help=\"Dictionary file\")\n parser.add_argument('-verbose', action=\"store_true\", default=False)\n parser.add_argument('-skip_lines', type=int, default=0,\n help=\"Skip first lines of the embedding file\")\n parser.add_argument('-type', choices=TYPES, default=\"GloVe\")\n opt = parser.parse_args()\n\n enc_vocab, dec_vocab = get_vocabs(opt.dict_file)\n if opt.type == \"word2vec\":\n opt.skip_lines = 1\n\n embeddings_enc = get_embeddings(opt.emb_file_enc, opt, flag='enc')\n embeddings_dec = get_embeddings(opt.emb_file_dec, opt, flag='dec')\n\n filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab,\n embeddings_enc,\n opt)\n filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab,\n embeddings_dec,\n opt)\n logger.info(\"\\nMatching: \")\n match_percent = [_['match'] / (_['match'] + _['miss']) * 100\n for _ in [enc_count, dec_count]]\n logger.info(\"\\t* enc: %d match, %d missing, (%.2f%%)\"\n % (enc_count['match'],\n enc_count['miss'],\n match_percent[0]))\n logger.info(\"\\t* dec: %d match, %d missing, (%.2f%%)\"\n % (dec_count['match'],\n dec_count['miss'],\n match_percent[1]))\n\n logger.info(\"\\nFiltered embeddings:\")\n logger.info(\"\\t* enc: \", filtered_enc_embeddings.size())\n logger.info(\"\\t* dec: \", filtered_dec_embeddings.size())\n\n enc_output_file = opt.output_file + \".enc.pt\"\n dec_output_file = opt.output_file + \".dec.pt\"\n logger.info(\"\\nSaving embedding as:\\n\\t* enc: %s\\n\\t* dec: %s\"\n % (enc_output_file, dec_output_file))\n torch.save(filtered_enc_embeddings, enc_output_file)\n torch.save(filtered_dec_embeddings, dec_output_file)\n logger.info(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n init_logger('embeddings_to_torch.log')\n main()\n", "path": "tools/embeddings_to_torch.py"}]} | 3,837 | 164 |
gh_patches_debug_398 | rasdani/github-patches | git_diff | optuna__optuna-1882 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the document for `with_traceback` method of Optuna's exception classes
Currently, Optuna's exception classes have the documentations of `with_traceback` method, which is inherited from `Exception`. I don't think it is informative for readers and it can be removed from the reference.

The following `Exception` has the `with_traceback` method.
- [ ] `optuna.exceptions.CLIUsageError`
- [ ] `optuna.exceptions.OptunaError`
- [ ] `optuna.exceptions.TrialPruned`
- [ ] `optuna.exceptions.CLIUsageError`
- [ ] `optuna.exceptions.StorageInternalError`
- [ ] `optuna.exceptions.DuplicatedStudyError`
CC @keisuke-umezawa Please let me know if you have any comments.
</issue>
<code>
[start of docs/source/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 # import os
16 # import sys
17 # sys.path.insert(0, os.path.abspath('.'))
18
19 import pkg_resources
20
21 from sphinx_gallery.sorting import FileNameSortKey
22
23 __version__ = pkg_resources.get_distribution('optuna').version
24
25 # -- Project information -----------------------------------------------------
26
27 project = 'Optuna'
28 copyright = '2018, Optuna Contributors.'
29 author = 'Optuna Contributors.'
30
31 # The short X.Y version
32 version = __version__
33 # The full version, including alpha/beta/rc tags
34 release = __version__
35
36 # -- General configuration ---------------------------------------------------
37
38 # If your documentation needs a minimal Sphinx version, state it here.
39 #
40 # needs_sphinx = '1.0'
41
42 # Add any Sphinx extension module names here, as strings. They can be
43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 # ones.
45 extensions = [
46 'sphinx.ext.autodoc',
47 'sphinx.ext.autosummary',
48 'sphinx.ext.doctest',
49 'sphinx.ext.intersphinx',
50 'sphinx.ext.mathjax',
51 'sphinx.ext.napoleon',
52 'sphinx.ext.viewcode',
53 'sphinx.ext.githubpages',
54 'cliff.sphinxext',
55 'sphinx_gallery.gen_gallery',
56 ]
57
58 # Add any paths that contain templates here, relative to this directory.
59 templates_path = ['_templates']
60
61 # The suffix(es) of source filenames.
62 # You can specify multiple suffix as a list of string:
63 #
64 # source_suffix = ['.rst', '.md']
65 source_suffix = '.rst'
66
67 # The master toctree document.
68 master_doc = 'index'
69
70 # The language for content autogenerated by Sphinx. Refer to documentation
71 # for a list of supported languages.
72 #
73 # This is also used if you do content translation via gettext catalogs.
74 # Usually you set "language" from the command line for these cases.
75 language = None
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 # This pattern also affects html_static_path and html_extra_path .
80 exclude_patterns = []
81
82 # The name of the Pygments (syntax highlighting) style to use.
83 pygments_style = 'sphinx'
84
85 # -- Options for HTML output -------------------------------------------------
86
87 # The theme to use for HTML and HTML Help pages. See the documentation for
88 # a list of builtin themes.
89 #
90 html_theme = 'sphinx_rtd_theme'
91
92 # Theme options are theme-specific and customize the look and feel of a theme
93 # further. For a list of options available for each theme, see the
94 # documentation.
95 #
96 html_theme_options = {
97 'logo_only': True
98 }
99
100 html_favicon = '../image/favicon.ico'
101
102 html_logo = '../image/optuna-logo.png'
103
104 # Add any paths that contain custom static files (such as style sheets) here,
105 # relative to this directory. They are copied after the builtin static files,
106 # so a file named "default.css" will overwrite the builtin "default.css".
107 html_static_path = ['_static', 'plotly_figures']
108 html_css_files = ["css/custom.css"]
109
110 # Custom sidebar templates, must be a dictionary that maps document names
111 # to template names.
112 #
113 # The default sidebars (for documents that don't match any pattern) are
114 # defined by theme itself. Builtin themes are using these templates by
115 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
116 # 'searchbox.html']``.
117 #
118 # html_sidebars = {}
119
120 # -- Options for HTMLHelp output ---------------------------------------------
121
122 # Output file base name for HTML help builder.
123 htmlhelp_basename = 'Optunadoc'
124
125 # -- Options for LaTeX output ------------------------------------------------
126
127 latex_elements = {
128 # The paper size ('letterpaper' or 'a4paper').
129 #
130 # 'papersize': 'letterpaper',
131
132 # The font size ('10pt', '11pt' or '12pt').
133 #
134 # 'pointsize': '10pt',
135
136 # Additional stuff for the LaTeX preamble.
137 #
138 # 'preamble': '',
139
140 # Latex figure (float) alignment
141 #
142 # 'figure_align': 'htbp',
143 }
144
145 # Grouping the document tree into LaTeX files. List of tuples
146 # (source start file, target name, title,
147 # author, documentclass [howto, manual, or own class]).
148 latex_documents = [
149 (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),
150 ]
151
152 # -- Options for manual page output ------------------------------------------
153
154 # One entry per manual page. List of tuples
155 # (source start file, name, description, authors, manual section).
156 man_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]
157
158 # -- Options for Texinfo output ----------------------------------------------
159
160 # Grouping the document tree into Texinfo files. List of tuples
161 # (source start file, target name, title, author,
162 # dir menu entry, description, category)
163 texinfo_documents = [
164 (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',
165 'One line description of project.', 'Miscellaneous'),
166 ]
167
168 intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
169
170 # -- Extension configuration -------------------------------------------------
171 autosummary_generate = True
172 autodoc_default_options = {
173 'members': True,
174 'inherited-members': True,
175 }
176
177 sphinx_gallery_conf = {
178 'examples_dirs': [
179 '../../tutorial',
180 ],
181 'gallery_dirs': [
182 'tutorial',
183 ],
184 'within_subsection_order': FileNameSortKey,
185 'filename_pattern': r'/*\.py',
186 'first_notebook_cell': None,
187 }
188
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -172,6 +172,7 @@
autodoc_default_options = {
'members': True,
'inherited-members': True,
+ 'exclude-members': 'with_traceback',
}
sphinx_gallery_conf = {
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -172,6 +172,7 @@\n autodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n+ 'exclude-members': 'with_traceback',\n }\n \n sphinx_gallery_conf = {\n", "issue": "Remove the document for `with_traceback` method of Optuna's exception classes\nCurrently, Optuna's exception classes have the documentations of `with_traceback` method, which is inherited from `Exception`. I don't think it is informative for readers and it can be removed from the reference.\r\n\r\n\r\n\r\nThe following `Exception` has the `with_traceback` method.\r\n- [ ] `optuna.exceptions.CLIUsageError`\r\n- [ ] `optuna.exceptions.OptunaError`\r\n- [ ] `optuna.exceptions.TrialPruned`\r\n- [ ] `optuna.exceptions.CLIUsageError`\r\n- [ ] `optuna.exceptions.StorageInternalError`\r\n- [ ] `optuna.exceptions.DuplicatedStudyError`\r\n\r\nCC @keisuke-umezawa Please let me know if you have any comments.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pkg_resources\n\nfrom sphinx_gallery.sorting import FileNameSortKey\n\n__version__ = pkg_resources.get_distribution('optuna').version\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Optuna'\ncopyright = '2018, Optuna Contributors.'\nauthor = 'Optuna Contributors.'\n\n# The short X.Y version\nversion = __version__\n# The full version, including alpha/beta/rc tags\nrelease = __version__\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'cliff.sphinxext',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'logo_only': True\n}\n\nhtml_favicon = '../image/favicon.ico'\n\nhtml_logo = '../image/optuna-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', 'plotly_figures']\nhtml_css_files = [\"css/custom.css\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optunadoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\n 'One line description of project.', 'Miscellaneous'),\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# -- Extension configuration -------------------------------------------------\nautosummary_generate = True\nautodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n}\n\nsphinx_gallery_conf = {\n 'examples_dirs': [\n '../../tutorial',\n ],\n 'gallery_dirs': [\n 'tutorial',\n ],\n 'within_subsection_order': FileNameSortKey,\n 'filename_pattern': r'/*\\.py',\n 'first_notebook_cell': None,\n}\n", "path": "docs/source/conf.py"}]} | 2,568 | 82 |
gh_patches_debug_27672 | rasdani/github-patches | git_diff | bids-standard__pybids-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
model: JSON to dict modified key values for transformation
In ` Replace` transformation, you specify as a dict which variables to transform.
e.g.:
```
{'LIKELY': "5"}
```
However, the parser from JSON to dict to convert BIDS Stats Models modifies keys to lower case, which in the case of specific case sensitive values modifies the transformation itself.
</issue>
<code>
[start of bids/utils.py]
1 """ Utility functions. """
2
3 import re
4 import os
5
6
7 def listify(obj):
8 ''' Wraps all non-list or tuple objects in a list; provides a simple way
9 to accept flexible arguments. '''
10 return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
11
12
13 def matches_entities(obj, entities, strict=False):
14 ''' Checks whether an object's entities match the input. '''
15 if strict and set(obj.entities.keys()) != set(entities.keys()):
16 return False
17
18 comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))
19 for k in comm_ents:
20 current = obj.entities[k]
21 target = entities[k]
22 if isinstance(target, (list, tuple)):
23 if current not in target:
24 return False
25 elif current != target:
26 return False
27 return True
28
29
30 def natural_sort(l, field=None):
31 '''
32 based on snippet found at http://stackoverflow.com/a/4836734/2445984
33 '''
34 convert = lambda text: int(text) if text.isdigit() else text.lower()
35
36 def alphanum_key(key):
37 if field is not None:
38 key = getattr(key, field)
39 if not isinstance(key, str):
40 key = str(key)
41 return [convert(c) for c in re.split('([0-9]+)', key)]
42 return sorted(l, key=alphanum_key)
43
44
45 def convert_JSON(j):
46 """ Recursively convert CamelCase keys to snake_case.
47 From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria
48 """
49
50 def camel_to_snake(s):
51 a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
52 return a.sub(r'_\1', s).lower()
53
54 def convertArray(a):
55 newArr = []
56 for i in a:
57 if isinstance(i,list):
58 newArr.append(convertArray(i))
59 elif isinstance(i, dict):
60 newArr.append(convert_JSON(i))
61 else:
62 newArr.append(i)
63 return newArr
64
65 out = {}
66 for k, value in j.items():
67 newK = camel_to_snake(k)
68
69 if isinstance(value, dict):
70 out[newK] = convert_JSON(value)
71 elif isinstance(value, list):
72 out[newK] = convertArray(value)
73 else:
74 out[newK] = value
75
76 return out
77
78
79 def splitext(path):
80 """splitext for paths with directories that may contain dots.
81 From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module"""
82 li = []
83 path_without_extensions = os.path.join(os.path.dirname(path),
84 os.path.basename(path).split(os.extsep)[0])
85 extensions = os.path.basename(path).split(os.extsep)[1:]
86 li.append(path_without_extensions)
87 # li.append(extensions) if you want extensions in another list inside the list that is returned.
88 li.extend(extensions)
89 return li
90
91
92 def make_bidsfile(filename):
93 """Create a BIDSFile instance of the appropriate class. """
94 from .layout import models
95
96 patt = re.compile("[._]*[a-zA-Z0-9]*?\\.([^/\\\\]+)$")
97 m = re.search(patt, filename)
98
99 ext = None if not m else m.group(1)
100
101 if ext in ['nii', 'nii.gz']:
102 cls = 'BIDSImageFile'
103 elif ext in ['tsv', 'tsv.gz']:
104 cls = 'BIDSDataFile'
105 elif ext == 'json':
106 cls = 'BIDSJSONFile'
107 else:
108 cls = 'BIDSFile'
109
110 Cls = getattr(models, cls)
111 return Cls(filename)
112
[end of bids/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bids/utils.py b/bids/utils.py
--- a/bids/utils.py
+++ b/bids/utils.py
@@ -44,9 +44,10 @@
def convert_JSON(j):
""" Recursively convert CamelCase keys to snake_case.
- From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria
+ From: https://stackoverflow.com/questions/17156078/
+ converting-identifier-naming-between-camelcase-and-
+ underscores-during-json-seria
"""
-
def camel_to_snake(s):
a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return a.sub(r'_\1', s).lower()
@@ -54,7 +55,7 @@
def convertArray(a):
newArr = []
for i in a:
- if isinstance(i,list):
+ if isinstance(i, list):
newArr.append(convertArray(i))
elif isinstance(i, dict):
newArr.append(convert_JSON(i))
@@ -66,7 +67,8 @@
for k, value in j.items():
newK = camel_to_snake(k)
- if isinstance(value, dict):
+ # Replace transformation uses a dict, so skip lower-casing
+ if isinstance(value, dict) and k != 'Replace':
out[newK] = convert_JSON(value)
elif isinstance(value, list):
out[newK] = convertArray(value)
| {"golden_diff": "diff --git a/bids/utils.py b/bids/utils.py\n--- a/bids/utils.py\n+++ b/bids/utils.py\n@@ -44,9 +44,10 @@\n \n def convert_JSON(j):\n \"\"\" Recursively convert CamelCase keys to snake_case.\n- From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria\n+ From: https://stackoverflow.com/questions/17156078/\n+ converting-identifier-naming-between-camelcase-and-\n+ underscores-during-json-seria\n \"\"\"\n-\n def camel_to_snake(s):\n a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')\n return a.sub(r'_\\1', s).lower()\n@@ -54,7 +55,7 @@\n def convertArray(a):\n newArr = []\n for i in a:\n- if isinstance(i,list):\n+ if isinstance(i, list):\n newArr.append(convertArray(i))\n elif isinstance(i, dict):\n newArr.append(convert_JSON(i))\n@@ -66,7 +67,8 @@\n for k, value in j.items():\n newK = camel_to_snake(k)\n \n- if isinstance(value, dict):\n+ # Replace transformation uses a dict, so skip lower-casing\n+ if isinstance(value, dict) and k != 'Replace':\n out[newK] = convert_JSON(value)\n elif isinstance(value, list):\n out[newK] = convertArray(value)\n", "issue": "model: JSON to dict modified key values for transformation\nIn ` Replace` transformation, you specify as a dict which variables to transform.\r\n\r\ne.g.:\r\n\r\n```\r\n{'LIKELY': \"5\"}\r\n```\r\n\r\nHowever, the parser from JSON to dict to convert BIDS Stats Models modifies keys to lower case, which in the case of specific case sensitive values modifies the transformation itself.\n", "before_files": [{"content": "\"\"\" Utility functions. \"\"\"\n\nimport re\nimport os\n\n\ndef listify(obj):\n ''' Wraps all non-list or tuple objects in a list; provides a simple way\n to accept flexible arguments. '''\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]\n\n\ndef matches_entities(obj, entities, strict=False):\n ''' Checks whether an object's entities match the input. '''\n if strict and set(obj.entities.keys()) != set(entities.keys()):\n return False\n\n comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))\n for k in comm_ents:\n current = obj.entities[k]\n target = entities[k]\n if isinstance(target, (list, tuple)):\n if current not in target:\n return False\n elif current != target:\n return False\n return True\n\n\ndef natural_sort(l, field=None):\n '''\n based on snippet found at http://stackoverflow.com/a/4836734/2445984\n '''\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n\n def alphanum_key(key):\n if field is not None:\n key = getattr(key, field)\n if not isinstance(key, str):\n key = str(key)\n return [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\n\ndef convert_JSON(j):\n \"\"\" Recursively convert CamelCase keys to snake_case.\n From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria\n \"\"\"\n\n def camel_to_snake(s):\n a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')\n return a.sub(r'_\\1', s).lower()\n\n def convertArray(a):\n newArr = []\n for i in a:\n if isinstance(i,list):\n newArr.append(convertArray(i))\n elif isinstance(i, dict):\n newArr.append(convert_JSON(i))\n else:\n newArr.append(i)\n return newArr\n\n out = {}\n for k, value in j.items():\n newK = camel_to_snake(k)\n\n if isinstance(value, dict):\n out[newK] = convert_JSON(value)\n elif isinstance(value, list):\n out[newK] = convertArray(value)\n else:\n out[newK] = value\n\n return out\n\n\ndef splitext(path):\n \"\"\"splitext for paths with directories that may contain dots.\n From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module\"\"\"\n li = []\n path_without_extensions = os.path.join(os.path.dirname(path),\n os.path.basename(path).split(os.extsep)[0])\n extensions = os.path.basename(path).split(os.extsep)[1:]\n li.append(path_without_extensions)\n # li.append(extensions) if you want extensions in another list inside the list that is returned.\n li.extend(extensions)\n return li\n\n\ndef make_bidsfile(filename):\n \"\"\"Create a BIDSFile instance of the appropriate class. \"\"\"\n from .layout import models\n\n patt = re.compile(\"[._]*[a-zA-Z0-9]*?\\\\.([^/\\\\\\\\]+)$\")\n m = re.search(patt, filename)\n\n ext = None if not m else m.group(1)\n\n if ext in ['nii', 'nii.gz']:\n cls = 'BIDSImageFile'\n elif ext in ['tsv', 'tsv.gz']:\n cls = 'BIDSDataFile'\n elif ext == 'json':\n cls = 'BIDSJSONFile'\n else:\n cls = 'BIDSFile'\n\n Cls = getattr(models, cls)\n return Cls(filename)\n", "path": "bids/utils.py"}]} | 1,683 | 353 |
gh_patches_debug_24872 | rasdani/github-patches | git_diff | rotki__rotki-174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
USD Value for IOTA is incorrect
## Problem Definition
The usd value reported on my exchange is inconsistent with the usd value that rotkehlchen shows.
I tried to find where the USD value is calculated for exchange assets and I found the following API call: [rotkehlchen.inquirer#L68](https://github.com/kascheri12/rotkehlchen/blob/master/rotkehlchen/inquirer.py#L68)
The asset "IOTA" uses symbol "IOT" at the api endpoint therefore the incorrect rate is returned when querying:
https://min-api.cryptocompare.com/data/price?fsym=IOTA&tsyms=USD
vs.
https://min-api.cryptocompare.com/data/price?fsym=IOT&tsyms=USD
USD Value for IOTA is incorrect
## Problem Definition
The usd value reported on my exchange is inconsistent with the usd value that rotkehlchen shows.
I tried to find where the USD value is calculated for exchange assets and I found the following API call: [rotkehlchen.inquirer#L68](https://github.com/kascheri12/rotkehlchen/blob/master/rotkehlchen/inquirer.py#L68)
The asset "IOTA" uses symbol "IOT" at the api endpoint therefore the incorrect rate is returned when querying:
https://min-api.cryptocompare.com/data/price?fsym=IOTA&tsyms=USD
vs.
https://min-api.cryptocompare.com/data/price?fsym=IOT&tsyms=USD
</issue>
<code>
[start of rotkehlchen/constants.py]
1 from typing import cast
2 from rotkehlchen import typing
3
4 ETH_DAO_FORK_TS = 1469020840 # 2016-07-20 13:20:40 UTC
5 BTC_BCH_FORK_TS = 1501593374 # 2017-08-01 13:16:14 UTC
6
7 SUPPORTED_EXCHANGES = ['kraken', 'poloniex', 'bittrex', 'bitmex', 'binance']
8 ROTKEHLCHEN_SERVER_TIMEOUT = 5
9 ALL_REMOTES_TIMEOUT = 20
10
11 YEAR_IN_SECONDS = 31536000 # 60 * 60 * 24 * 365
12
13 S_EMPTYSTR = typing.EmptyStr('')
14
15 S_BTC = cast(typing.NonEthTokenBlockchainAsset, 'BTC')
16 S_ETH = cast(typing.NonEthTokenBlockchainAsset, 'ETH')
17 S_DATACOIN = cast(typing.NonEthTokenBlockchainAsset, 'DATAcoin')
18
19 S_RDN = cast(typing.EthToken, 'RDN')
20
21
22 S_USD = typing.FiatAsset('USD')
23 S_EUR = typing.FiatAsset('EUR')
24 S_GBP = typing.FiatAsset('GBP')
25 S_JPY = typing.FiatAsset('JPY')
26 S_CNY = typing.FiatAsset('CNY')
27 FIAT_CURRENCIES = (S_USD, S_EUR, S_GBP, S_JPY, S_CNY)
28
29 EV_BUY = typing.EventType('buy')
30 EV_SELL = typing.EventType('sell')
31 EV_TX_GAS_COST = typing.EventType('tx_gas_cost')
32 EV_ASSET_MOVE = typing.EventType('asset_movement')
33 EV_LOAN_SETTLE = typing.EventType('loan_settlement')
34 EV_INTEREST_PAYMENT = typing.EventType('interest_rate_payment')
35 EV_MARGIN_CLOSE = typing.EventType('margin_position_close')
36
[end of rotkehlchen/constants.py]
[start of rotkehlchen/inquirer.py]
1 from __future__ import unicode_literals
2
3 import logging
4 from typing import Dict, Iterable, Optional, cast
5
6 import requests
7
8 from rotkehlchen import typing
9 from rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD
10 from rotkehlchen.errors import RemoteError
11 from rotkehlchen.fval import FVal
12 from rotkehlchen.utils import query_fiat_pair, retry_calls, rlk_jsonloads
13
14 logger = logging.getLogger(__name__)
15
16
17 def get_fiat_usd_exchange_rates(
18 currencies: Optional[Iterable[typing.FiatAsset]] = None,
19 ) -> Dict[typing.FiatAsset, FVal]:
20 rates = {S_USD: FVal(1)}
21 if not currencies:
22 currencies = FIAT_CURRENCIES[1:]
23 for currency in currencies:
24 rates[currency] = query_fiat_pair(S_USD, currency)
25
26 return rates
27
28
29 def world_to_cryptocompare(asset):
30 # Adjust some ETH tokens to how cryptocompare knows them
31 if asset == S_RDN:
32 # remove this if cryptocompare changes the symbol
33 asset = cast(typing.EthToken, 'RDN*')
34 elif asset == S_DATACOIN:
35 asset = cast(typing.NonEthTokenBlockchainAsset, 'DATA')
36
37 return asset
38
39
40 class Inquirer(object):
41 def __init__(self, kraken=None): # TODO: Add type after fixing cyclic dependency
42 self.kraken = kraken
43 self.session = requests.session()
44
45 def query_kraken_for_price(
46 self,
47 asset: typing.Asset,
48 asset_btc_price: FVal,
49 ) -> FVal:
50 if asset == 'BTC':
51 return self.kraken.usdprice['BTC']
52 return asset_btc_price * self.kraken.usdprice['BTC']
53
54 def find_usd_price(
55 self,
56 asset: typing.Asset,
57 asset_btc_price: Optional[FVal] = None,
58 ) -> FVal:
59 if self.kraken and self.kraken.first_connection_made and asset_btc_price is not None:
60 return self.query_kraken_for_price(asset, asset_btc_price)
61
62 asset = world_to_cryptocompare(asset)
63 resp = retry_calls(
64 5,
65 'find_usd_price',
66 'requests.get',
67 requests.get,
68 u'https://min-api.cryptocompare.com/data/price?'
69 'fsym={}&tsyms=USD'.format(asset)
70 )
71
72 if resp.status_code != 200:
73 raise RemoteError('Cant reach cryptocompare to get USD value of {}'.format(asset))
74
75 resp = rlk_jsonloads(resp.text)
76
77 # If there is an error in the response skip this token
78 if 'USD' not in resp:
79 if resp['Response'] == 'Error':
80 print('Could not query USD price for {}. Error: "{}"'.format(
81 asset,
82 resp['Message']),
83 )
84 else:
85 print('Could not query USD price for {}'.format(asset))
86 return FVal(0)
87
88 return FVal(resp['USD'])
89
[end of rotkehlchen/inquirer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rotkehlchen/constants.py b/rotkehlchen/constants.py
--- a/rotkehlchen/constants.py
+++ b/rotkehlchen/constants.py
@@ -15,6 +15,7 @@
S_BTC = cast(typing.NonEthTokenBlockchainAsset, 'BTC')
S_ETH = cast(typing.NonEthTokenBlockchainAsset, 'ETH')
S_DATACOIN = cast(typing.NonEthTokenBlockchainAsset, 'DATAcoin')
+S_IOTA = cast(typing.NonEthTokenBlockchainAsset, 'IOTA')
S_RDN = cast(typing.EthToken, 'RDN')
diff --git a/rotkehlchen/inquirer.py b/rotkehlchen/inquirer.py
--- a/rotkehlchen/inquirer.py
+++ b/rotkehlchen/inquirer.py
@@ -6,7 +6,7 @@
import requests
from rotkehlchen import typing
-from rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD
+from rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD, S_IOTA
from rotkehlchen.errors import RemoteError
from rotkehlchen.fval import FVal
from rotkehlchen.utils import query_fiat_pair, retry_calls, rlk_jsonloads
@@ -33,6 +33,8 @@
asset = cast(typing.EthToken, 'RDN*')
elif asset == S_DATACOIN:
asset = cast(typing.NonEthTokenBlockchainAsset, 'DATA')
+ elif asset == S_IOTA:
+ asset = cast(typing.NonEthTokenBlockchainAsset, 'IOT')
return asset
| {"golden_diff": "diff --git a/rotkehlchen/constants.py b/rotkehlchen/constants.py\n--- a/rotkehlchen/constants.py\n+++ b/rotkehlchen/constants.py\n@@ -15,6 +15,7 @@\n S_BTC = cast(typing.NonEthTokenBlockchainAsset, 'BTC')\n S_ETH = cast(typing.NonEthTokenBlockchainAsset, 'ETH')\n S_DATACOIN = cast(typing.NonEthTokenBlockchainAsset, 'DATAcoin')\n+S_IOTA = cast(typing.NonEthTokenBlockchainAsset, 'IOTA')\n \n S_RDN = cast(typing.EthToken, 'RDN')\n \ndiff --git a/rotkehlchen/inquirer.py b/rotkehlchen/inquirer.py\n--- a/rotkehlchen/inquirer.py\n+++ b/rotkehlchen/inquirer.py\n@@ -6,7 +6,7 @@\n import requests\n \n from rotkehlchen import typing\n-from rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD\n+from rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD, S_IOTA\n from rotkehlchen.errors import RemoteError\n from rotkehlchen.fval import FVal\n from rotkehlchen.utils import query_fiat_pair, retry_calls, rlk_jsonloads\n@@ -33,6 +33,8 @@\n asset = cast(typing.EthToken, 'RDN*')\n elif asset == S_DATACOIN:\n asset = cast(typing.NonEthTokenBlockchainAsset, 'DATA')\n+ elif asset == S_IOTA:\n+ asset = cast(typing.NonEthTokenBlockchainAsset, 'IOT')\n \n return asset\n", "issue": "USD Value for IOTA is incorrect\n## Problem Definition\r\n\r\nThe usd value reported on my exchange is inconsistent with the usd value that rotkehlchen shows.\r\n\r\nI tried to find where the USD value is calculated for exchange assets and I found the following API call: [rotkehlchen.inquirer#L68](https://github.com/kascheri12/rotkehlchen/blob/master/rotkehlchen/inquirer.py#L68) \r\n\r\nThe asset \"IOTA\" uses symbol \"IOT\" at the api endpoint therefore the incorrect rate is returned when querying: \r\nhttps://min-api.cryptocompare.com/data/price?fsym=IOTA&tsyms=USD\r\nvs.\r\nhttps://min-api.cryptocompare.com/data/price?fsym=IOT&tsyms=USD\nUSD Value for IOTA is incorrect\n## Problem Definition\r\n\r\nThe usd value reported on my exchange is inconsistent with the usd value that rotkehlchen shows.\r\n\r\nI tried to find where the USD value is calculated for exchange assets and I found the following API call: [rotkehlchen.inquirer#L68](https://github.com/kascheri12/rotkehlchen/blob/master/rotkehlchen/inquirer.py#L68) \r\n\r\nThe asset \"IOTA\" uses symbol \"IOT\" at the api endpoint therefore the incorrect rate is returned when querying: \r\nhttps://min-api.cryptocompare.com/data/price?fsym=IOTA&tsyms=USD\r\nvs.\r\nhttps://min-api.cryptocompare.com/data/price?fsym=IOT&tsyms=USD\n", "before_files": [{"content": "from typing import cast\nfrom rotkehlchen import typing\n\nETH_DAO_FORK_TS = 1469020840 # 2016-07-20 13:20:40 UTC\nBTC_BCH_FORK_TS = 1501593374 # 2017-08-01 13:16:14 UTC\n\nSUPPORTED_EXCHANGES = ['kraken', 'poloniex', 'bittrex', 'bitmex', 'binance']\nROTKEHLCHEN_SERVER_TIMEOUT = 5\nALL_REMOTES_TIMEOUT = 20\n\nYEAR_IN_SECONDS = 31536000 # 60 * 60 * 24 * 365\n\nS_EMPTYSTR = typing.EmptyStr('')\n\nS_BTC = cast(typing.NonEthTokenBlockchainAsset, 'BTC')\nS_ETH = cast(typing.NonEthTokenBlockchainAsset, 'ETH')\nS_DATACOIN = cast(typing.NonEthTokenBlockchainAsset, 'DATAcoin')\n\nS_RDN = cast(typing.EthToken, 'RDN')\n\n\nS_USD = typing.FiatAsset('USD')\nS_EUR = typing.FiatAsset('EUR')\nS_GBP = typing.FiatAsset('GBP')\nS_JPY = typing.FiatAsset('JPY')\nS_CNY = typing.FiatAsset('CNY')\nFIAT_CURRENCIES = (S_USD, S_EUR, S_GBP, S_JPY, S_CNY)\n\nEV_BUY = typing.EventType('buy')\nEV_SELL = typing.EventType('sell')\nEV_TX_GAS_COST = typing.EventType('tx_gas_cost')\nEV_ASSET_MOVE = typing.EventType('asset_movement')\nEV_LOAN_SETTLE = typing.EventType('loan_settlement')\nEV_INTEREST_PAYMENT = typing.EventType('interest_rate_payment')\nEV_MARGIN_CLOSE = typing.EventType('margin_position_close')\n", "path": "rotkehlchen/constants.py"}, {"content": "from __future__ import unicode_literals\n\nimport logging\nfrom typing import Dict, Iterable, Optional, cast\n\nimport requests\n\nfrom rotkehlchen import typing\nfrom rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_RDN, S_USD\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.utils import query_fiat_pair, retry_calls, rlk_jsonloads\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_fiat_usd_exchange_rates(\n currencies: Optional[Iterable[typing.FiatAsset]] = None,\n) -> Dict[typing.FiatAsset, FVal]:\n rates = {S_USD: FVal(1)}\n if not currencies:\n currencies = FIAT_CURRENCIES[1:]\n for currency in currencies:\n rates[currency] = query_fiat_pair(S_USD, currency)\n\n return rates\n\n\ndef world_to_cryptocompare(asset):\n # Adjust some ETH tokens to how cryptocompare knows them\n if asset == S_RDN:\n # remove this if cryptocompare changes the symbol\n asset = cast(typing.EthToken, 'RDN*')\n elif asset == S_DATACOIN:\n asset = cast(typing.NonEthTokenBlockchainAsset, 'DATA')\n\n return asset\n\n\nclass Inquirer(object):\n def __init__(self, kraken=None): # TODO: Add type after fixing cyclic dependency\n self.kraken = kraken\n self.session = requests.session()\n\n def query_kraken_for_price(\n self,\n asset: typing.Asset,\n asset_btc_price: FVal,\n ) -> FVal:\n if asset == 'BTC':\n return self.kraken.usdprice['BTC']\n return asset_btc_price * self.kraken.usdprice['BTC']\n\n def find_usd_price(\n self,\n asset: typing.Asset,\n asset_btc_price: Optional[FVal] = None,\n ) -> FVal:\n if self.kraken and self.kraken.first_connection_made and asset_btc_price is not None:\n return self.query_kraken_for_price(asset, asset_btc_price)\n\n asset = world_to_cryptocompare(asset)\n resp = retry_calls(\n 5,\n 'find_usd_price',\n 'requests.get',\n requests.get,\n u'https://min-api.cryptocompare.com/data/price?'\n 'fsym={}&tsyms=USD'.format(asset)\n )\n\n if resp.status_code != 200:\n raise RemoteError('Cant reach cryptocompare to get USD value of {}'.format(asset))\n\n resp = rlk_jsonloads(resp.text)\n\n # If there is an error in the response skip this token\n if 'USD' not in resp:\n if resp['Response'] == 'Error':\n print('Could not query USD price for {}. Error: \"{}\"'.format(\n asset,\n resp['Message']),\n )\n else:\n print('Could not query USD price for {}'.format(asset))\n return FVal(0)\n\n return FVal(resp['USD'])\n", "path": "rotkehlchen/inquirer.py"}]} | 2,253 | 386 |
gh_patches_debug_42204 | rasdani/github-patches | git_diff | getsentry__sentry-python-295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Celery integration not capturing error with max_tasks_per_child = 1
The celery integration is failing to capture the exception when I use a celery factory pattern which patches the celery task with Flask's context.
This is `web/celery_factory.py`
```
# Source: https://stackoverflow.com/questions/12044776/how-to-use-flask-sqlalchemy-in-a-celery-task
from celery import Celery
import flask
class FlaskCelery(Celery):
def __init__(self, *args, **kwargs):
super(FlaskCelery, self).__init__(*args, **kwargs)
self.patch_task()
if 'app' in kwargs:
self.init_app(kwargs['app'])
def patch_task(self):
TaskBase = self.Task
_celery = self
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
if flask.has_app_context():
return TaskBase.__call__(self, *args, **kwargs)
else:
with _celery.app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
self.Task = ContextTask
def init_app(self, app):
self.app = app
self.config_from_object(app.config)
celery_app = FlaskCelery()
```
I am adding a random `raise` inside a simple task
```
import celery_app from celery_factory.py
@celery_app.task
def simple_task():
raise Exception("Testing Celery exception")
```
The error I get printed is:
```
[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')
Traceback (most recent call last):
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/celery_factory.py", line 28, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 66, in _inner
reraise(*_capture_exception())
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 52, in reraise
raise value
File "/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 64, in _inner
return f(*args, **kwargs)
File "/Users/okomarov/Documents/repos/myproject/web/simple_task.py", line 4, in simple_task
raise Exception("Testing Celery exception")
RuntimeError: No active exception to reraise
```
Relevant pip packages:
```
Celery==4.2.1
Flask==1.0.2
sentry-sdk==0.7.4
```
The integration is called as following (flask integration works as expected):
```
from flask import Flask
from celery_factory import celery_app
from config import config_to_use
def create_app():
app = Flask()
app.config.from_object(config_to_use)
init_logging(app)
register_extensions(app)
register_blueprints(app)
register_jinja_extras(app)
return app
def init_logging(app):
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
sentry_sdk.init(
dsn=app.config.get('FLASK_SENTRY_DSN'),
integrations=[FlaskIntegration(), CeleryIntegration()]
)
...
```
</issue>
<code>
[start of sentry_sdk/integrations/celery.py]
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore
6
7 from sentry_sdk.hub import Hub
8 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
9 from sentry_sdk._compat import reraise
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 import celery.app.trace as trace # type: ignore
20
21 old_build_tracer = trace.build_tracer
22
23 def sentry_build_tracer(name, task, *args, **kwargs):
24 # Need to patch both methods because older celery sometimes
25 # short-circuits to task.run if it thinks it's safe.
26 task.__call__ = _wrap_task_call(task.__call__)
27 task.run = _wrap_task_call(task.run)
28 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
29
30 trace.build_tracer = sentry_build_tracer
31
32 # This logger logs every status of every task that ran on the worker.
33 # Meaning that every task's breadcrumbs are full of stuff like "Task
34 # <foo> raised unexpected <bar>".
35 ignore_logger("celery.worker.job")
36
37
38 def _wrap_tracer(task, f):
39 # Need to wrap tracer for pushing the scope before prerun is sent, and
40 # popping it after postrun is sent.
41 #
42 # This is the reason we don't use signals for hooking in the first place.
43 # Also because in Celery 3, signal dispatch returns early if one handler
44 # crashes.
45 def _inner(*args, **kwargs):
46 hub = Hub.current
47 if hub.get_integration(CeleryIntegration) is None:
48 return f(*args, **kwargs)
49
50 with hub.push_scope() as scope:
51 scope._name = "celery"
52 scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
53
54 return f(*args, **kwargs)
55
56 return _inner
57
58
59 def _wrap_task_call(f):
60 # Need to wrap task call because the exception is caught before we get to
61 # see it. Also celery's reported stacktrace is untrustworthy.
62 def _inner(*args, **kwargs):
63 try:
64 return f(*args, **kwargs)
65 except Exception:
66 reraise(*_capture_exception())
67
68 return _inner
69
70
71 def _make_event_processor(task, uuid, args, kwargs, request=None):
72 def event_processor(event, hint):
73 with capture_internal_exceptions():
74 event["transaction"] = task.name
75
76 with capture_internal_exceptions():
77 extra = event.setdefault("extra", {})
78 extra["celery-job"] = {
79 "task_name": task.name,
80 "args": args,
81 "kwargs": kwargs,
82 }
83
84 if "exc_info" in hint:
85 with capture_internal_exceptions():
86 if isinstance(hint["exc_info"][1], Retry):
87 return None
88
89 if hasattr(task, "throws") and isinstance(
90 hint["exc_info"][1], task.throws
91 ):
92 return None
93
94 with capture_internal_exceptions():
95 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
96 event["fingerprint"] = [
97 "celery",
98 "SoftTimeLimitExceeded",
99 getattr(task, "name", task),
100 ]
101
102 return event
103
104 return event_processor
105
106
107 def _capture_exception():
108 hub = Hub.current
109 exc_info = sys.exc_info()
110
111 if hub.get_integration(CeleryIntegration) is not None:
112 event, hint = event_from_exception(
113 exc_info,
114 client_options=hub.client.options,
115 mechanism={"type": "celery", "handled": False},
116 )
117 hub.capture_event(event, hint=hint)
118
119 return exc_info
120
[end of sentry_sdk/integrations/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -23,12 +23,14 @@
def sentry_build_tracer(name, task, *args, **kwargs):
# Need to patch both methods because older celery sometimes
# short-circuits to task.run if it thinks it's safe.
- task.__call__ = _wrap_task_call(task.__call__)
- task.run = _wrap_task_call(task.run)
+ task.__call__ = _wrap_task_call(task, task.__call__)
+ task.run = _wrap_task_call(task, task.run)
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
trace.build_tracer = sentry_build_tracer
+ _patch_worker_exit()
+
# This logger logs every status of every task that ran on the worker.
# Meaning that every task's breadcrumbs are full of stuff like "Task
# <foo> raised unexpected <bar>".
@@ -56,14 +58,17 @@
return _inner
-def _wrap_task_call(f):
+def _wrap_task_call(task, f):
# Need to wrap task call because the exception is caught before we get to
# see it. Also celery's reported stacktrace is untrustworthy.
def _inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
- reraise(*_capture_exception())
+ exc_info = sys.exc_info()
+ with capture_internal_exceptions():
+ _capture_exception(task, exc_info)
+ reraise(*exc_info)
return _inner
@@ -82,15 +87,6 @@
}
if "exc_info" in hint:
- with capture_internal_exceptions():
- if isinstance(hint["exc_info"][1], Retry):
- return None
-
- if hasattr(task, "throws") and isinstance(
- hint["exc_info"][1], task.throws
- ):
- return None
-
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
@@ -104,16 +100,39 @@
return event_processor
-def _capture_exception():
+def _capture_exception(task, exc_info):
hub = Hub.current
- exc_info = sys.exc_info()
- if hub.get_integration(CeleryIntegration) is not None:
- event, hint = event_from_exception(
- exc_info,
- client_options=hub.client.options,
- mechanism={"type": "celery", "handled": False},
- )
- hub.capture_event(event, hint=hint)
+ if hub.get_integration(CeleryIntegration) is None:
+ return
+ if isinstance(exc_info[1], Retry):
+ return
+ if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
+ return
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=hub.client.options,
+ mechanism={"type": "celery", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
+
+
+def _patch_worker_exit():
+ # Need to flush queue before worker shutdown because a crashing worker will
+ # call os._exit
+ from billiard.pool import Worker # type: ignore
+
+ old_workloop = Worker.workloop
+
+ def sentry_workloop(*args, **kwargs):
+ try:
+ return old_workloop(*args, **kwargs)
+ finally:
+ with capture_internal_exceptions():
+ hub = Hub.current
+ if hub.get_integration(CeleryIntegration) is not None:
+ hub.flush()
- return exc_info
+ Worker.workloop = sentry_workloop
| {"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -23,12 +23,14 @@\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n- task.__call__ = _wrap_task_call(task.__call__)\n- task.run = _wrap_task_call(task.run)\n+ task.__call__ = _wrap_task_call(task, task.__call__)\n+ task.run = _wrap_task_call(task, task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n \n trace.build_tracer = sentry_build_tracer\n \n+ _patch_worker_exit()\n+\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n@@ -56,14 +58,17 @@\n return _inner\n \n \n-def _wrap_task_call(f):\n+def _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n- reraise(*_capture_exception())\n+ exc_info = sys.exc_info()\n+ with capture_internal_exceptions():\n+ _capture_exception(task, exc_info)\n+ reraise(*exc_info)\n \n return _inner\n \n@@ -82,15 +87,6 @@\n }\n \n if \"exc_info\" in hint:\n- with capture_internal_exceptions():\n- if isinstance(hint[\"exc_info\"][1], Retry):\n- return None\n-\n- if hasattr(task, \"throws\") and isinstance(\n- hint[\"exc_info\"][1], task.throws\n- ):\n- return None\n-\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n@@ -104,16 +100,39 @@\n return event_processor\n \n \n-def _capture_exception():\n+def _capture_exception(task, exc_info):\n hub = Hub.current\n- exc_info = sys.exc_info()\n \n- if hub.get_integration(CeleryIntegration) is not None:\n- event, hint = event_from_exception(\n- exc_info,\n- client_options=hub.client.options,\n- mechanism={\"type\": \"celery\", \"handled\": False},\n- )\n- hub.capture_event(event, hint=hint)\n+ if hub.get_integration(CeleryIntegration) is None:\n+ return\n+ if isinstance(exc_info[1], Retry):\n+ return\n+ if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n+ return\n+\n+ event, hint = event_from_exception(\n+ exc_info,\n+ client_options=hub.client.options,\n+ mechanism={\"type\": \"celery\", \"handled\": False},\n+ )\n+\n+ hub.capture_event(event, hint=hint)\n+\n+\n+def _patch_worker_exit():\n+ # Need to flush queue before worker shutdown because a crashing worker will\n+ # call os._exit\n+ from billiard.pool import Worker # type: ignore\n+\n+ old_workloop = Worker.workloop\n+\n+ def sentry_workloop(*args, **kwargs):\n+ try:\n+ return old_workloop(*args, **kwargs)\n+ finally:\n+ with capture_internal_exceptions():\n+ hub = Hub.current\n+ if hub.get_integration(CeleryIntegration) is not None:\n+ hub.flush()\n \n- return exc_info\n+ Worker.workloop = sentry_workloop\n", "issue": "Celery integration not capturing error with max_tasks_per_child = 1\nThe celery integration is failing to capture the exception when I use a celery factory pattern which patches the celery task with Flask's context.\r\n\r\nThis is `web/celery_factory.py`\r\n```\r\n# Source: https://stackoverflow.com/questions/12044776/how-to-use-flask-sqlalchemy-in-a-celery-task\r\n\r\nfrom celery import Celery\r\nimport flask\r\n\r\n\r\nclass FlaskCelery(Celery):\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(FlaskCelery, self).__init__(*args, **kwargs)\r\n self.patch_task()\r\n\r\n if 'app' in kwargs:\r\n self.init_app(kwargs['app'])\r\n\r\n def patch_task(self):\r\n TaskBase = self.Task\r\n _celery = self\r\n\r\n class ContextTask(TaskBase):\r\n abstract = True\r\n\r\n def __call__(self, *args, **kwargs):\r\n if flask.has_app_context():\r\n return TaskBase.__call__(self, *args, **kwargs)\r\n else:\r\n with _celery.app.app_context():\r\n return TaskBase.__call__(self, *args, **kwargs)\r\n\r\n self.Task = ContextTask\r\n\r\n def init_app(self, app):\r\n self.app = app\r\n self.config_from_object(app.config)\r\n\r\n\r\ncelery_app = FlaskCelery()\r\n```\r\n\r\nI am adding a random `raise` inside a simple task\r\n\r\n```\r\nimport celery_app from celery_factory.py\r\n@celery_app.task\r\ndef simple_task():\r\n raise Exception(\"Testing Celery exception\")\r\n```\r\n\r\nThe error I get printed is:\r\n```\r\n[2019-03-08 21:24:21,117: ERROR/ForkPoolWorker-31] Task simple_task[d6e959b1-7253-4e55-861d-c1968ae14e1c] raised unexpected: RuntimeError('No active exception to reraise')\r\nTraceback (most recent call last):\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py\", line 382, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/Users/okomarov/Documents/repos/myproject/web/celery_factory.py\", line 28, in __call__\r\n return TaskBase.__call__(self, *args, **kwargs)\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/celery/app/trace.py\", line 641, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 66, in _inner\r\n reraise(*_capture_exception())\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py\", line 52, in reraise\r\n raise value\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 64, in _inner\r\n return f(*args, **kwargs)\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 66, in _inner\r\n reraise(*_capture_exception())\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/_compat.py\", line 52, in reraise\r\n raise value\r\n File \"/Users/okomarov/.virtualenvs/myenv/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 64, in _inner\r\n return f(*args, **kwargs)\r\n File \"/Users/okomarov/Documents/repos/myproject/web/simple_task.py\", line 4, in simple_task\r\n raise Exception(\"Testing Celery exception\")\r\nRuntimeError: No active exception to reraise\r\n```\r\n\r\nRelevant pip packages:\r\n```\r\nCelery==4.2.1\r\nFlask==1.0.2\r\nsentry-sdk==0.7.4\r\n```\r\n\r\nThe integration is called as following (flask integration works as expected):\r\n```\r\nfrom flask import Flask\r\nfrom celery_factory import celery_app\r\nfrom config import config_to_use\r\n\r\n\r\ndef create_app():\r\n app = Flask()\r\n app.config.from_object(config_to_use)\r\n\r\n init_logging(app)\r\n\r\n register_extensions(app)\r\n register_blueprints(app)\r\n register_jinja_extras(app)\r\n\r\n return app\r\n\r\n\r\ndef init_logging(app):\r\n import sentry_sdk\r\n from sentry_sdk.integrations.flask import FlaskIntegration\r\n from sentry_sdk.integrations.celery import CeleryIntegration\r\n\r\n sentry_sdk.init(\r\n dsn=app.config.get('FLASK_SENTRY_DSN'),\r\n integrations=[FlaskIntegration(), CeleryIntegration()]\r\n )\r\n\r\n...\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded, Retry # type: ignore\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task.__call__)\n task.run = _wrap_task_call(task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n reraise(*_capture_exception())\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if isinstance(hint[\"exc_info\"][1], Retry):\n return None\n\n if hasattr(task, \"throws\") and isinstance(\n hint[\"exc_info\"][1], task.throws\n ):\n return None\n\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception():\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(CeleryIntegration) is not None:\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/celery.py"}]} | 2,764 | 897 |
gh_patches_debug_20588 | rasdani/github-patches | git_diff | dotkom__onlineweb4-812 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide attendanceevent from django admin
https://online.ntnu.no/admin/events/attendanceevent/
This view should not be used by anyone and attendance info should be edited through the event directly.
Should be possible to hide this by removing
`admin.site.register(AttendanceEvent, AttendanceEventAdmin)`
in events/admin.py (untested)
</issue>
<code>
[start of apps/events/admin.py]
1 # -*- coding: utf-8 -*-
2
3 from django import forms
4 from django.contrib import admin
5 from django.core import validators
6 from django.utils.translation import ugettext as _
7
8 from apps.events.models import Event
9 from apps.events.models import AttendanceEvent
10 from apps.events.models import Attendee
11 from apps.events.models import CompanyEvent
12 from apps.events.models import RuleBundle
13 from apps.events.models import FieldOfStudyRule
14 from apps.events.models import GradeRule
15 from apps.events.models import UserGroupRule
16 from apps.feedback.admin import FeedbackRelationInline
17
18
19
20 class AttendeeInline(admin.TabularInline):
21 model = Attendee
22 extra = 1
23 classes = ('grp-collapse grp-open',) # style
24 inline_classes = ('grp-collapse grp-open',) # style
25
26
27 class CompanyInline(admin.TabularInline):
28 model = CompanyEvent
29 max_num = 20
30 extra = 0
31 classes = ('grp-collapse grp-open',) # style
32 inline_classes = ('grp-collapse grp-open',) # style
33
34
35 class RuleBundleInline(admin.TabularInline):
36 model = RuleBundle
37 extra = 1
38 max_num = 20
39 classes = ('grp-collapse grp-open',) # style
40 inline_classes = ('grp-collapse grp-open',) # style
41
42
43 class AttendanceEventAdmin(admin.ModelAdmin):
44 model = AttendanceEvent
45 inlines = (AttendeeInline, RuleBundleInline)
46
47
48 class AttendeeAdmin(admin.ModelAdmin):
49 model = Attendee
50 list_display = ('user', 'event', 'paid')
51 actions = None
52
53 def delete_model(self, request, obj):
54 event = obj.event.event
55 event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=obj.user)
56 obj.delete()
57
58
59 class CompanyEventAdmin(admin.ModelAdmin):
60 model = CompanyEvent
61 inlines = (CompanyInline,)
62
63
64 class RuleBundleAdmin(admin.ModelAdmin):
65 model = RuleBundle
66
67
68 class FieldOfStudyRuleAdmin(admin.ModelAdmin):
69 model = FieldOfStudyRule
70
71
72 class GradeRuleAdmin(admin.ModelAdmin):
73 model = GradeRule
74
75
76 class UserGroupRuleAdmin(admin.ModelAdmin):
77 model = UserGroupRule
78
79
80 class AttendanceEventInline(admin.StackedInline):
81 model = AttendanceEvent
82 max_num = 1
83 extra = 0
84 filter_horizontal = ('rule_bundles',)
85 classes = ('grp-collapse grp-open',) # style
86 inline_classes = ('grp-collapse grp-open',) # style
87
88
89 class EventAdmin(admin.ModelAdmin):
90 inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)
91 exclude = ("author", )
92 search_fields = ('title',)
93
94 def save_model(self, request, obj, form, change):
95 if not change: # created
96 obj.author = request.user
97 else:
98 # If attendance max capacity changed we will notify users that they are now on the attend list
99 old_event = Event.objects.get(id=obj.id)
100 if old_event.is_attendance_event() and old_event.wait_list:
101 diff_capacity = obj.attendance_event.max_capacity - old_event.attendance_event.max_capacity
102 if diff_capacity > 0:
103 if diff_capacity > len(old_event.wait_list):
104 diff_capacity = len(old_event.wait_list)
105 # Using old_event because max_capacity has already been changed in obj
106 old_event.notify_waiting_list(host=request.META['HTTP_HOST'], extra_capacity=diff_capacity)
107 obj.save()
108
109 def save_formset(self, request, form, formset, change):
110 instances = formset.save(commit=False)
111 for instance in instances:
112 instance.save()
113 formset.save_m2m()
114
115 def get_form(self, request, obj=None, **kwargs):
116 form = super(EventAdmin, self).get_form(request, obj, **kwargs)
117 form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(50)]
118 form.base_fields['ingress'].validators=[validators.MinLengthValidator(75)]
119 form.base_fields['description'].validators=[validators.MinLengthValidator(140)]
120 return form
121
122 admin.site.register(Event, EventAdmin)
123 admin.site.register(Attendee, AttendeeAdmin)
124 admin.site.register(AttendanceEvent, AttendanceEventAdmin)
125 admin.site.register(RuleBundle, RuleBundleAdmin)
126 admin.site.register(GradeRule, GradeRuleAdmin)
127 admin.site.register(UserGroupRule, UserGroupRuleAdmin)
128 admin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)
129
[end of apps/events/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/admin.py b/apps/events/admin.py
--- a/apps/events/admin.py
+++ b/apps/events/admin.py
@@ -40,11 +40,6 @@
inline_classes = ('grp-collapse grp-open',) # style
-class AttendanceEventAdmin(admin.ModelAdmin):
- model = AttendanceEvent
- inlines = (AttendeeInline, RuleBundleInline)
-
-
class AttendeeAdmin(admin.ModelAdmin):
model = Attendee
list_display = ('user', 'event', 'paid')
@@ -119,9 +114,9 @@
form.base_fields['description'].validators=[validators.MinLengthValidator(140)]
return form
+
admin.site.register(Event, EventAdmin)
admin.site.register(Attendee, AttendeeAdmin)
-admin.site.register(AttendanceEvent, AttendanceEventAdmin)
admin.site.register(RuleBundle, RuleBundleAdmin)
admin.site.register(GradeRule, GradeRuleAdmin)
admin.site.register(UserGroupRule, UserGroupRuleAdmin)
| {"golden_diff": "diff --git a/apps/events/admin.py b/apps/events/admin.py\n--- a/apps/events/admin.py\n+++ b/apps/events/admin.py\n@@ -40,11 +40,6 @@\n inline_classes = ('grp-collapse grp-open',) # style\n \n \n-class AttendanceEventAdmin(admin.ModelAdmin):\n- model = AttendanceEvent\n- inlines = (AttendeeInline, RuleBundleInline)\n-\n-\n class AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event', 'paid')\n@@ -119,9 +114,9 @@\n form.base_fields['description'].validators=[validators.MinLengthValidator(140)]\n return form\n \n+\n admin.site.register(Event, EventAdmin)\n admin.site.register(Attendee, AttendeeAdmin)\n-admin.site.register(AttendanceEvent, AttendanceEventAdmin)\n admin.site.register(RuleBundle, RuleBundleAdmin)\n admin.site.register(GradeRule, GradeRuleAdmin)\n admin.site.register(UserGroupRule, UserGroupRuleAdmin)\n", "issue": "Hide attendanceevent from django admin\nhttps://online.ntnu.no/admin/events/attendanceevent/\n\nThis view should not be used by anyone and attendance info should be edited through the event directly. \n\nShould be possible to hide this by removing \n`admin.site.register(AttendanceEvent, AttendanceEventAdmin)`\n in events/admin.py (untested)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.core import validators\nfrom django.utils.translation import ugettext as _\n\nfrom apps.events.models import Event\nfrom apps.events.models import AttendanceEvent\nfrom apps.events.models import Attendee\nfrom apps.events.models import CompanyEvent\nfrom apps.events.models import RuleBundle\nfrom apps.events.models import FieldOfStudyRule\nfrom apps.events.models import GradeRule\nfrom apps.events.models import UserGroupRule\nfrom apps.feedback.admin import FeedbackRelationInline\n\n\n\nclass AttendeeInline(admin.TabularInline):\n model = Attendee\n extra = 1\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass CompanyInline(admin.TabularInline):\n model = CompanyEvent\n max_num = 20\n extra = 0\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass RuleBundleInline(admin.TabularInline):\n model = RuleBundle\n extra = 1\n max_num = 20\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass AttendanceEventAdmin(admin.ModelAdmin):\n model = AttendanceEvent\n inlines = (AttendeeInline, RuleBundleInline)\n\n\nclass AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event', 'paid')\n actions = None\n\n def delete_model(self, request, obj):\n event = obj.event.event\n event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=obj.user)\n obj.delete()\n\n\nclass CompanyEventAdmin(admin.ModelAdmin):\n model = CompanyEvent\n inlines = (CompanyInline,)\n\n\nclass RuleBundleAdmin(admin.ModelAdmin):\n model = RuleBundle\n\n\nclass FieldOfStudyRuleAdmin(admin.ModelAdmin):\n model = FieldOfStudyRule\n\n\nclass GradeRuleAdmin(admin.ModelAdmin):\n model = GradeRule\n\n\nclass UserGroupRuleAdmin(admin.ModelAdmin):\n model = UserGroupRule\n\n\nclass AttendanceEventInline(admin.StackedInline):\n model = AttendanceEvent\n max_num = 1\n extra = 0\n filter_horizontal = ('rule_bundles',)\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass EventAdmin(admin.ModelAdmin):\n inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)\n exclude = (\"author\", )\n search_fields = ('title',)\n\n def save_model(self, request, obj, form, change):\n if not change: # created\n obj.author = request.user\n else:\n # If attendance max capacity changed we will notify users that they are now on the attend list\n old_event = Event.objects.get(id=obj.id)\n if old_event.is_attendance_event() and old_event.wait_list:\n diff_capacity = obj.attendance_event.max_capacity - old_event.attendance_event.max_capacity\n if diff_capacity > 0:\n if diff_capacity > len(old_event.wait_list):\n diff_capacity = len(old_event.wait_list)\n # Using old_event because max_capacity has already been changed in obj\n old_event.notify_waiting_list(host=request.META['HTTP_HOST'], extra_capacity=diff_capacity)\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instance in instances:\n instance.save()\n formset.save_m2m()\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(EventAdmin, self).get_form(request, obj, **kwargs)\n form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(50)]\n form.base_fields['ingress'].validators=[validators.MinLengthValidator(75)]\n form.base_fields['description'].validators=[validators.MinLengthValidator(140)]\n return form\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Attendee, AttendeeAdmin)\nadmin.site.register(AttendanceEvent, AttendanceEventAdmin)\nadmin.site.register(RuleBundle, RuleBundleAdmin)\nadmin.site.register(GradeRule, GradeRuleAdmin)\nadmin.site.register(UserGroupRule, UserGroupRuleAdmin)\nadmin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)\n", "path": "apps/events/admin.py"}]} | 1,842 | 214 |
gh_patches_debug_31888 | rasdani/github-patches | git_diff | Flexget__Flexget-1608 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash when using convert_magnet with aria2.
There is a crash when using convert_magnet with aria2.
```
2017-01-02 19:51 CRITICAL task discover_movies_hd BUG: Unhandled error in plugin aria2: u'file'
2017-01-02 19:51 CRITICAL manager discover_movies_hd An unexpected crash has occurred. Writing crash report to /home/wyrm/.flexget/crash_report.2017.01.02.195150778857.log. Please verify you are running the latest version of flexget by using "flexget -V" from CLI or by using version_checker plugin at http://flexget.com/wiki/Plugins/version_checker. You are currently using version 2.8.17.dev
2017-01-02 19:51 WARNING task discover_movies_hd Aborting task (plugin: aria2)
```
I don't have a crashlog, sorry.
</issue>
<code>
[start of flexget/plugins/modify/convert_magnet.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3 import os
4 import time
5 import logging
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.utils.tools import parse_timedelta
10 from flexget.utils.pathscrub import pathscrub
11
12 log = logging.getLogger('convert_magnet')
13
14
15 class ConvertMagnet(object):
16 """Convert magnet only entries to a torrent file"""
17
18 schema = {
19 "oneOf": [
20 # Allow convert_magnet: no form to turn off plugin altogether
21 {"type": "boolean"},
22 {
23 "type": "object",
24 "properties": {
25 "timeout": {"type": "string", "format": "interval", "default": "30 seconds"},
26 },
27 "additionalProperties": False
28 }
29 ]
30 }
31
32 def magnet_to_torrent(self, magnet_uri, destination_folder, timeout):
33 import libtorrent
34 params = libtorrent.parse_magnet_uri(magnet_uri)
35 session = libtorrent.session()
36 # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash
37 params['info_hash'] = bytes(params['info_hash'])
38 handle = libtorrent.add_magnet_uri(session, magnet_uri, params)
39 log.debug('Acquiring torrent metadata for magnet %s', magnet_uri)
40 timeout_value = timeout
41 while not handle.has_metadata():
42 time.sleep(0.1)
43 timeout_value -= 0.1
44 if timeout_value <= 0:
45 raise plugin.PluginError('Timed out after {} seconds trying to magnetize'.format(timeout))
46 log.debug('Metadata acquired')
47 torrent_info = handle.get_torrent_info()
48 torrent_file = libtorrent.create_torrent(torrent_info)
49 torrent_path = pathscrub(os.path.join(destination_folder, torrent_info.name() + ".torrent"))
50 with open(torrent_path, "wb") as f:
51 f.write(libtorrent.bencode(torrent_file.generate()))
52 log.debug('Torrent file wrote to %s', torrent_path)
53 return torrent_path
54
55 def prepare_config(self, config):
56 if not isinstance(config, dict):
57 config = {}
58 config.setdefault('timeout', '30 seconds')
59 return config
60
61 @plugin.priority(255)
62 def on_task_start(self, task, config):
63 if config is False:
64 return
65 try:
66 import libtorrent # noqa
67 except ImportError:
68 raise plugin.DependencyError('convert_magnet', 'libtorrent', 'libtorrent package required', log)
69
70 @plugin.priority(130)
71 def on_task_download(self, task, config):
72 if config is False:
73 return
74 config = self.prepare_config(config)
75 # Create the conversion target directory
76 converted_path = os.path.join(task.manager.config_base, 'converted')
77
78 timeout = parse_timedelta(config['timeout']).total_seconds()
79
80 if not os.path.isdir(converted_path):
81 os.mkdir(converted_path)
82
83 for entry in task.accepted:
84 if entry['url'].startswith('magnet:'):
85 entry.setdefault('urls', [entry['url']])
86 try:
87 log.info('Converting entry {} magnet URI to a torrent file'.format(entry['title']))
88 torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)
89 except (plugin.PluginError, TypeError) as e:
90 log.error('Unable to convert Magnet URI for entry %s: %s', entry['title'], e)
91 continue
92 # Windows paths need an extra / prepended to them for url
93 if not torrent_file.startswith('/'):
94 torrent_file = '/' + torrent_file
95 entry['url'] = torrent_file
96 entry['file'] = torrent_file
97 # make sure it's first in the list because of how download plugin works
98 entry['urls'].insert(0, 'file://{}'.format(torrent_file))
99
100
101 @event('plugin.register')
102 def register_plugin():
103 plugin.register(ConvertMagnet, 'convert_magnet', api_ver=2)
104
[end of flexget/plugins/modify/convert_magnet.py]
[start of flexget/plugins/clients/aria2.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import os
6 import xmlrpc.client
7 from socket import error as socket_error
8
9 from flexget import plugin
10 from flexget.event import event
11 from flexget.utils.template import RenderError
12
13 log = logging.getLogger('aria2')
14
15
16 class OutputAria2(object):
17 """
18 Simple Aria2 output
19
20 Example::
21
22 aria2:
23 path: ~/downloads/
24
25 """
26
27 schema = {
28 'type': 'object',
29 'properties': {
30 'server': {'type': 'string', 'default': 'localhost'},
31 'port': {'type': 'integer', 'default': 6800},
32 'secret': {'type': 'string', 'default': ''},
33 'username': {'type': 'string', 'default': ''}, # NOTE: To be deprecated by aria2
34 'password': {'type': 'string', 'default': ''},
35 'path': {'type': 'string'},
36 'options': {
37 'type': 'object',
38 'additionalProperties': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]}
39 }
40
41 },
42 'required': ['path'],
43 'additionalProperties': False
44 }
45
46 def aria2_connection(self, server, port, username=None, password=None):
47 if username and password:
48 userpass = '%s:%s@' % (username, password)
49 else:
50 userpass = ''
51 url = 'http://%s%s:%s/rpc' % (userpass, server, port)
52 log.debug('aria2 url: %s' % url)
53 log.info('Connecting to daemon at %s', url)
54 try:
55 return xmlrpc.client.ServerProxy(url).aria2
56 except xmlrpc.client.ProtocolError as err:
57 raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'
58 % (url, err.errcode, err.errmsg), log)
59 except xmlrpc.client.Fault as err:
60 raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s'
61 % (url, err.faultString), log)
62 except socket_error as e:
63 raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (url, e), log)
64 except:
65 log.debug('Unexpected error during aria2 connection', exc_info=True)
66 raise plugin.PluginError('Unidentified error during connection to aria2 daemon', log)
67
68 def prepare_config(self, config):
69 config.setdefault('server', 'localhost')
70 config.setdefault('port', 6800)
71 config.setdefault('username', '')
72 config.setdefault('password', '')
73 config.setdefault('secret', '')
74 config.setdefault('options', {})
75 return config
76
77 def on_task_output(self, task, config):
78 # don't add when learning
79 if task.options.learn:
80 return
81 config = self.prepare_config(config)
82 aria2 = self.aria2_connection(config['server'], config['port'],
83 config['username'], config['password'])
84 for entry in task.accepted:
85 if task.options.test:
86 log.verbose('Would add `%s` to aria2.', entry['title'])
87 continue
88 try:
89 self.add_entry(aria2, entry, config)
90 except socket_error as se:
91 entry.fail('Unable to reach Aria2: %s', se)
92 except xmlrpc.client.Fault as err:
93 log.critical('Fault code %s message %s', err.faultCode, err.faultString)
94 entry.fail('Aria2 communication Fault')
95 except Exception as e:
96 log.debug('Exception type %s', type(e), exc_info=True)
97 raise
98
99 def add_entry(self, aria2, entry, config):
100 """
101 Add entry to Aria2
102 """
103 options = config['options']
104 try:
105 options['dir'] = os.path.expanduser(entry.render(config['path']).rstrip('/'))
106 except RenderError as e:
107 entry.fail('failed to render \'path\': %s' % e)
108 return
109 secret = None
110 if config['secret']:
111 secret = 'token:%s' % config['secret']
112 # handle torrent files
113 if 'torrent' in entry:
114 if secret:
115 return aria2.addTorrent(secret, xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
116 return aria2.addTorrent(xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
117 # handle everything else (except metalink -- which is unsupported)
118 # so magnets, https, http, ftp .. etc
119 if secret:
120 return aria2.addUri(secret, [entry['url']], options)
121 return aria2.addUri([entry['url']], options)
122
123
124 @event('plugin.register')
125 def register_plugin():
126 plugin.register(OutputAria2, 'aria2', api_ver=2)
127
[end of flexget/plugins/clients/aria2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/clients/aria2.py b/flexget/plugins/clients/aria2.py
--- a/flexget/plugins/clients/aria2.py
+++ b/flexget/plugins/clients/aria2.py
@@ -111,9 +111,17 @@
secret = 'token:%s' % config['secret']
# handle torrent files
if 'torrent' in entry:
+ if 'file' in entry:
+ torrent_file = entry['file']
+ elif 'location' in entry:
+ # in case download plugin moved the file elsewhere
+ torrent_file = entry['location']
+ else:
+ entry.fail('Cannot find torrent file')
+ return
if secret:
- return aria2.addTorrent(secret, xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
- return aria2.addTorrent(xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
+ return aria2.addTorrent(secret, xmlrpc.client.Binary(open(torrent_file, mode='rb').read()), [], options)
+ return aria2.addTorrent(xmlrpc.client.Binary(open(torrent_file, mode='rb').read()), [], options)
# handle everything else (except metalink -- which is unsupported)
# so magnets, https, http, ftp .. etc
if secret:
diff --git a/flexget/plugins/modify/convert_magnet.py b/flexget/plugins/modify/convert_magnet.py
--- a/flexget/plugins/modify/convert_magnet.py
+++ b/flexget/plugins/modify/convert_magnet.py
@@ -33,8 +33,10 @@
import libtorrent
params = libtorrent.parse_magnet_uri(magnet_uri)
session = libtorrent.session()
- # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash
- params['info_hash'] = bytes(params['info_hash'])
+ lt_version = [int(v) for v in libtorrent.version.split('.')]
+ if lt_version > [0,16,13,0]:
+ # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash
+ params['info_hash'] = params['info_hash'].to_bytes()
handle = libtorrent.add_magnet_uri(session, magnet_uri, params)
log.debug('Acquiring torrent metadata for magnet %s', magnet_uri)
timeout_value = timeout
| {"golden_diff": "diff --git a/flexget/plugins/clients/aria2.py b/flexget/plugins/clients/aria2.py\n--- a/flexget/plugins/clients/aria2.py\n+++ b/flexget/plugins/clients/aria2.py\n@@ -111,9 +111,17 @@\n secret = 'token:%s' % config['secret']\n # handle torrent files\n if 'torrent' in entry:\n+ if 'file' in entry:\n+ torrent_file = entry['file']\n+ elif 'location' in entry:\n+ # in case download plugin moved the file elsewhere\n+ torrent_file = entry['location']\n+ else:\n+ entry.fail('Cannot find torrent file')\n+ return\n if secret:\n- return aria2.addTorrent(secret, xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))\n- return aria2.addTorrent(xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))\n+ return aria2.addTorrent(secret, xmlrpc.client.Binary(open(torrent_file, mode='rb').read()), [], options)\n+ return aria2.addTorrent(xmlrpc.client.Binary(open(torrent_file, mode='rb').read()), [], options)\n # handle everything else (except metalink -- which is unsupported)\n # so magnets, https, http, ftp .. etc\n if secret:\ndiff --git a/flexget/plugins/modify/convert_magnet.py b/flexget/plugins/modify/convert_magnet.py\n--- a/flexget/plugins/modify/convert_magnet.py\n+++ b/flexget/plugins/modify/convert_magnet.py\n@@ -33,8 +33,10 @@\n import libtorrent\n params = libtorrent.parse_magnet_uri(magnet_uri)\n session = libtorrent.session()\n- # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash\n- params['info_hash'] = bytes(params['info_hash'])\n+ lt_version = [int(v) for v in libtorrent.version.split('.')]\n+ if lt_version > [0,16,13,0]:\n+ # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash\n+ params['info_hash'] = params['info_hash'].to_bytes()\n handle = libtorrent.add_magnet_uri(session, magnet_uri, params)\n log.debug('Acquiring torrent metadata for magnet %s', magnet_uri)\n timeout_value = timeout\n", "issue": "Crash when using convert_magnet with aria2.\nThere is a crash when using convert_magnet with aria2.\r\n\r\n```\r\n2017-01-02 19:51 CRITICAL task discover_movies_hd BUG: Unhandled error in plugin aria2: u'file'\r\n2017-01-02 19:51 CRITICAL manager discover_movies_hd An unexpected crash has occurred. Writing crash report to /home/wyrm/.flexget/crash_report.2017.01.02.195150778857.log. Please verify you are running the latest version of flexget by using \"flexget -V\" from CLI or by using version_checker plugin at http://flexget.com/wiki/Plugins/version_checker. You are currently using version 2.8.17.dev\r\n2017-01-02 19:51 WARNING task discover_movies_hd Aborting task (plugin: aria2)\r\n\r\n```\r\nI don't have a crashlog, sorry.\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nimport os\nimport time\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.tools import parse_timedelta\nfrom flexget.utils.pathscrub import pathscrub\n\nlog = logging.getLogger('convert_magnet')\n\n\nclass ConvertMagnet(object):\n \"\"\"Convert magnet only entries to a torrent file\"\"\"\n\n schema = {\n \"oneOf\": [\n # Allow convert_magnet: no form to turn off plugin altogether\n {\"type\": \"boolean\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"timeout\": {\"type\": \"string\", \"format\": \"interval\", \"default\": \"30 seconds\"},\n },\n \"additionalProperties\": False\n }\n ]\n }\n\n def magnet_to_torrent(self, magnet_uri, destination_folder, timeout):\n import libtorrent\n params = libtorrent.parse_magnet_uri(magnet_uri)\n session = libtorrent.session()\n # for some reason the info_hash needs to be bytes but it's a struct called sha1_hash\n params['info_hash'] = bytes(params['info_hash'])\n handle = libtorrent.add_magnet_uri(session, magnet_uri, params)\n log.debug('Acquiring torrent metadata for magnet %s', magnet_uri)\n timeout_value = timeout\n while not handle.has_metadata():\n time.sleep(0.1)\n timeout_value -= 0.1\n if timeout_value <= 0:\n raise plugin.PluginError('Timed out after {} seconds trying to magnetize'.format(timeout))\n log.debug('Metadata acquired')\n torrent_info = handle.get_torrent_info()\n torrent_file = libtorrent.create_torrent(torrent_info)\n torrent_path = pathscrub(os.path.join(destination_folder, torrent_info.name() + \".torrent\"))\n with open(torrent_path, \"wb\") as f:\n f.write(libtorrent.bencode(torrent_file.generate()))\n log.debug('Torrent file wrote to %s', torrent_path)\n return torrent_path\n\n def prepare_config(self, config):\n if not isinstance(config, dict):\n config = {}\n config.setdefault('timeout', '30 seconds')\n return config\n\n @plugin.priority(255)\n def on_task_start(self, task, config):\n if config is False:\n return\n try:\n import libtorrent # noqa\n except ImportError:\n raise plugin.DependencyError('convert_magnet', 'libtorrent', 'libtorrent package required', log)\n\n @plugin.priority(130)\n def on_task_download(self, task, config):\n if config is False:\n return\n config = self.prepare_config(config)\n # Create the conversion target directory\n converted_path = os.path.join(task.manager.config_base, 'converted')\n\n timeout = parse_timedelta(config['timeout']).total_seconds()\n\n if not os.path.isdir(converted_path):\n os.mkdir(converted_path)\n\n for entry in task.accepted:\n if entry['url'].startswith('magnet:'):\n entry.setdefault('urls', [entry['url']])\n try:\n log.info('Converting entry {} magnet URI to a torrent file'.format(entry['title']))\n torrent_file = self.magnet_to_torrent(entry['url'], converted_path, timeout)\n except (plugin.PluginError, TypeError) as e:\n log.error('Unable to convert Magnet URI for entry %s: %s', entry['title'], e)\n continue\n # Windows paths need an extra / prepended to them for url\n if not torrent_file.startswith('/'):\n torrent_file = '/' + torrent_file\n entry['url'] = torrent_file\n entry['file'] = torrent_file\n # make sure it's first in the list because of how download plugin works\n entry['urls'].insert(0, 'file://{}'.format(torrent_file))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ConvertMagnet, 'convert_magnet', api_ver=2)\n", "path": "flexget/plugins/modify/convert_magnet.py"}, {"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport os\nimport xmlrpc.client\nfrom socket import error as socket_error\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.template import RenderError\n\nlog = logging.getLogger('aria2')\n\n\nclass OutputAria2(object):\n \"\"\"\n Simple Aria2 output\n\n Example::\n\n aria2:\n path: ~/downloads/\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'server': {'type': 'string', 'default': 'localhost'},\n 'port': {'type': 'integer', 'default': 6800},\n 'secret': {'type': 'string', 'default': ''},\n 'username': {'type': 'string', 'default': ''}, # NOTE: To be deprecated by aria2\n 'password': {'type': 'string', 'default': ''},\n 'path': {'type': 'string'},\n 'options': {\n 'type': 'object',\n 'additionalProperties': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]}\n }\n\n },\n 'required': ['path'],\n 'additionalProperties': False\n }\n\n def aria2_connection(self, server, port, username=None, password=None):\n if username and password:\n userpass = '%s:%s@' % (username, password)\n else:\n userpass = ''\n url = 'http://%s%s:%s/rpc' % (userpass, server, port)\n log.debug('aria2 url: %s' % url)\n log.info('Connecting to daemon at %s', url)\n try:\n return xmlrpc.client.ServerProxy(url).aria2\n except xmlrpc.client.ProtocolError as err:\n raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'\n % (url, err.errcode, err.errmsg), log)\n except xmlrpc.client.Fault as err:\n raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s'\n % (url, err.faultString), log)\n except socket_error as e:\n raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (url, e), log)\n except:\n log.debug('Unexpected error during aria2 connection', exc_info=True)\n raise plugin.PluginError('Unidentified error during connection to aria2 daemon', log)\n\n def prepare_config(self, config):\n config.setdefault('server', 'localhost')\n config.setdefault('port', 6800)\n config.setdefault('username', '')\n config.setdefault('password', '')\n config.setdefault('secret', '')\n config.setdefault('options', {})\n return config\n\n def on_task_output(self, task, config):\n # don't add when learning\n if task.options.learn:\n return\n config = self.prepare_config(config)\n aria2 = self.aria2_connection(config['server'], config['port'],\n config['username'], config['password'])\n for entry in task.accepted:\n if task.options.test:\n log.verbose('Would add `%s` to aria2.', entry['title'])\n continue\n try:\n self.add_entry(aria2, entry, config)\n except socket_error as se:\n entry.fail('Unable to reach Aria2: %s', se)\n except xmlrpc.client.Fault as err:\n log.critical('Fault code %s message %s', err.faultCode, err.faultString)\n entry.fail('Aria2 communication Fault')\n except Exception as e:\n log.debug('Exception type %s', type(e), exc_info=True)\n raise\n\n def add_entry(self, aria2, entry, config):\n \"\"\"\n Add entry to Aria2\n \"\"\"\n options = config['options']\n try:\n options['dir'] = os.path.expanduser(entry.render(config['path']).rstrip('/'))\n except RenderError as e:\n entry.fail('failed to render \\'path\\': %s' % e)\n return\n secret = None\n if config['secret']:\n secret = 'token:%s' % config['secret']\n # handle torrent files\n if 'torrent' in entry:\n if secret:\n return aria2.addTorrent(secret, xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))\n return aria2.addTorrent(xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))\n # handle everything else (except metalink -- which is unsupported)\n # so magnets, https, http, ftp .. etc\n if secret:\n return aria2.addUri(secret, [entry['url']], options)\n return aria2.addUri([entry['url']], options)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputAria2, 'aria2', api_ver=2)\n", "path": "flexget/plugins/clients/aria2.py"}]} | 3,248 | 536 |
gh_patches_debug_16878 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-4747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ini_file - Insert missing option line before blank lines at the end of the section
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ini_file
##### ANSIBLE VERSION
```
ansible 2.1.1.0
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
N/A
##### SUMMARY
<!--- Explain the problem briefly -->
New lines are added to an existing section after blank lines separating sections, but should instead be added before blank lines at the end of a section.
##### STEPS TO REPRODUCE
Use ini_file to add a new line to a file.
Given test.ini:
```
[sect1]
opt1 = val1
[sect2]
opt2 = val2
```
Run this test command:
```
ansible -c local -m ini_file -a 'dest=test.ini section=sect1 option=opt13 value=val13' localhost
```
##### EXPECTED RESULTS
test.ini should look like this:
```
[sect1]
opt1 = val1
opt3 = val3
[sect2]
opt2 = val2
```
##### ACTUAL RESULTS
This file is still technically correct but just looks a bit misleading as opt3 is grouped closer to [sect2].
```
[sect1]
opt1 = val1
opt3 = val3
[sect2]
opt2 = val2
```
</issue>
<code>
[start of files/ini_file.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
5 # (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
6 #
7 # This file is part of Ansible
8 #
9 # Ansible is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # Ansible is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 DOCUMENTATION = '''
24 ---
25 module: ini_file
26 short_description: Tweak settings in INI files
27 extends_documentation_fragment: files
28 description:
29 - Manage (add, remove, change) individual settings in an INI-style file without having
30 to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
31 sections if they don't exist.
32 - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
33 version_added: "0.9"
34 options:
35 dest:
36 description:
37 - Path to the INI-style file; this file is created if required
38 required: true
39 default: null
40 section:
41 description:
42 - Section name in INI file. This is added if C(state=present) automatically when
43 a single value is being set.
44 required: true
45 default: null
46 option:
47 description:
48 - if set (required for changing a I(value)), this is the name of the option.
49 - May be omitted if adding/removing a whole I(section).
50 required: false
51 default: null
52 value:
53 description:
54 - the string value to be associated with an I(option). May be omitted when removing an I(option).
55 required: false
56 default: null
57 backup:
58 description:
59 - Create a backup file including the timestamp information so you can get
60 the original file back if you somehow clobbered it incorrectly.
61 required: false
62 default: "no"
63 choices: [ "yes", "no" ]
64 others:
65 description:
66 - all arguments accepted by the M(file) module also work here
67 required: false
68 state:
69 description:
70 - If set to C(absent) the option or section will be removed if present instead of created.
71 required: false
72 default: "present"
73 choices: [ "present", "absent" ]
74 no_extra_spaces:
75 description:
76 - do not insert spaces before and after '=' symbol
77 required: false
78 default: false
79 version_added: "2.1"
80 notes:
81 - While it is possible to add an I(option) without specifying a I(value), this makes
82 no sense.
83 - A section named C(default) cannot be added by the module, but if it exists, individual
84 options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)
85 Either use M(template) to create a base INI file with a C([default]) section, or use
86 M(lineinfile) to add the missing line.
87 requirements: [ ConfigParser ]
88 author:
89 - "Jan-Piet Mens (@jpmens)"
90 - "Ales Nosek (@noseka1)"
91 '''
92
93 EXAMPLES = '''
94 # Ensure "fav=lemonade is in section "[drinks]" in specified file
95 - ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes
96
97 - ini_file: dest=/etc/anotherconf
98 section=drinks
99 option=temperature
100 value=cold
101 backup=yes
102 '''
103
104 import os
105 import re
106
107 # ==============================================================
108 # match_opt
109
110 def match_opt(option, line):
111 option = re.escape(option)
112 return re.match('%s( |\t)*=' % option, line) \
113 or re.match('# *%s( |\t)*=' % option, line) \
114 or re.match('; *%s( |\t)*=' % option, line)
115
116 # ==============================================================
117 # match_active_opt
118
119 def match_active_opt(option, line):
120 option = re.escape(option)
121 return re.match('%s( |\t)*=' % option, line)
122
123 # ==============================================================
124 # do_ini
125
126 def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):
127
128
129 if not os.path.exists(filename):
130 try:
131 open(filename,'w').close()
132 except:
133 module.fail_json(msg="Destination file %s not writable" % filename)
134 ini_file = open(filename, 'r')
135 try:
136 ini_lines = ini_file.readlines()
137 # append a fake section line to simplify the logic
138 ini_lines.append('[')
139 finally:
140 ini_file.close()
141
142 within_section = not section
143 section_start = 0
144 changed = False
145 if no_extra_spaces:
146 assignment_format = '%s=%s\n'
147 else:
148 assignment_format = '%s = %s\n'
149
150 for index, line in enumerate(ini_lines):
151 if line.startswith('[%s]' % section):
152 within_section = True
153 section_start = index
154 elif line.startswith('['):
155 if within_section:
156 if state == 'present':
157 # insert missing option line at the end of the section
158 ini_lines.insert(index, assignment_format % (option, value))
159 changed = True
160 elif state == 'absent' and not option:
161 # remove the entire section
162 del ini_lines[section_start:index]
163 changed = True
164 break
165 else:
166 if within_section and option:
167 if state == 'present':
168 # change the existing option line
169 if match_opt(option, line):
170 newline = assignment_format % (option, value)
171 changed = ini_lines[index] != newline
172 ini_lines[index] = newline
173 if changed:
174 # remove all possible option occurences from the rest of the section
175 index = index + 1
176 while index < len(ini_lines):
177 line = ini_lines[index]
178 if line.startswith('['):
179 break
180 if match_active_opt(option, line):
181 del ini_lines[index]
182 else:
183 index = index + 1
184 break
185 else:
186 # comment out the existing option line
187 if match_active_opt(option, line):
188 ini_lines[index] = '#%s' % ini_lines[index]
189 changed = True
190 break
191
192 # remove the fake section line
193 del ini_lines[-1:]
194
195 if not within_section and option and state == 'present':
196 ini_lines.append('[%s]\n' % section)
197 ini_lines.append(assignment_format % (option, value))
198 changed = True
199
200
201 backup_file = None
202 if changed and not module.check_mode:
203 if backup:
204 backup_file = module.backup_local(filename)
205 ini_file = open(filename, 'w')
206 try:
207 ini_file.writelines(ini_lines)
208 finally:
209 ini_file.close()
210
211 return (changed, backup_file)
212
213 # ==============================================================
214 # main
215
216 def main():
217
218 module = AnsibleModule(
219 argument_spec = dict(
220 dest = dict(required=True),
221 section = dict(required=True),
222 option = dict(required=False),
223 value = dict(required=False),
224 backup = dict(default='no', type='bool'),
225 state = dict(default='present', choices=['present', 'absent']),
226 no_extra_spaces = dict(required=False, default=False, type='bool')
227 ),
228 add_file_common_args = True,
229 supports_check_mode = True
230 )
231
232 dest = os.path.expanduser(module.params['dest'])
233 section = module.params['section']
234 option = module.params['option']
235 value = module.params['value']
236 state = module.params['state']
237 backup = module.params['backup']
238 no_extra_spaces = module.params['no_extra_spaces']
239
240 (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)
241
242 file_args = module.load_file_common_arguments(module.params)
243 changed = module.set_fs_attributes_if_different(file_args, changed)
244
245 results = { 'changed': changed, 'msg': "OK", 'dest': dest }
246 if backup_file is not None:
247 results['backup_file'] = backup_file
248
249 # Mission complete
250 module.exit_json(**results)
251
252 # import module snippets
253 from ansible.module_utils.basic import *
254 if __name__ == '__main__':
255 main()
256
[end of files/ini_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/files/ini_file.py b/files/ini_file.py
--- a/files/ini_file.py
+++ b/files/ini_file.py
@@ -155,8 +155,12 @@
if within_section:
if state == 'present':
# insert missing option line at the end of the section
- ini_lines.insert(index, assignment_format % (option, value))
- changed = True
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ ini_lines.insert(i, assignment_format % (option, value))
+ changed = True
+ break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
| {"golden_diff": "diff --git a/files/ini_file.py b/files/ini_file.py\n--- a/files/ini_file.py\n+++ b/files/ini_file.py\n@@ -155,8 +155,12 @@\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n- ini_lines.insert(index, assignment_format % (option, value))\n- changed = True\n+ for i in range(index, 0, -1):\n+ # search backwards for previous non-blank or non-comment line\n+ if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n+ ini_lines.insert(i, assignment_format % (option, value))\n+ changed = True\n+ break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n", "issue": "ini_file - Insert missing option line before blank lines at the end of the section\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nini_file\n##### ANSIBLE VERSION\n\n```\nansible 2.1.1.0\n config file = /etc/ansible/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nN/A\n##### SUMMARY\n\n<!--- Explain the problem briefly -->\n\nNew lines are added to an existing section after blank lines separating sections, but should instead be added before blank lines at the end of a section.\n##### STEPS TO REPRODUCE\n\nUse ini_file to add a new line to a file.\n\nGiven test.ini:\n\n```\n[sect1]\nopt1 = val1\n\n[sect2]\nopt2 = val2\n```\n\nRun this test command:\n\n```\nansible -c local -m ini_file -a 'dest=test.ini section=sect1 option=opt13 value=val13' localhost\n```\n##### EXPECTED RESULTS\n\ntest.ini should look like this:\n\n```\n[sect1]\nopt1 = val1\nopt3 = val3\n\n[sect2]\nopt2 = val2\n```\n##### ACTUAL RESULTS\n\nThis file is still technically correct but just looks a bit misleading as opt3 is grouped closer to [sect2].\n\n```\n[sect1]\nopt1 = val1\n\nopt3 = val3\n[sect2]\nopt2 = val2\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\nversion_added: \"0.9\"\noptions:\n dest:\n description:\n - Path to the INI-style file; this file is created if required\n required: true\n default: null\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n required: true\n default: null\n option:\n description:\n - if set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n required: false\n default: null\n value:\n description:\n - the string value to be associated with an I(option). May be omitted when removing an I(option).\n required: false\n default: null\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n others:\n description:\n - all arguments accepted by the M(file) module also work here\n required: false\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n required: false\n default: \"present\"\n choices: [ \"present\", \"absent\" ]\n no_extra_spaces:\n description:\n - do not insert spaces before and after '=' symbol\n required: false\n default: false\n version_added: \"2.1\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - A section named C(default) cannot be added by the module, but if it exists, individual\n options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)\n Either use M(template) to create a base INI file with a C([default]) section, or use\n M(lineinfile) to add the missing line.\nrequirements: [ ConfigParser ]\nauthor:\n - \"Jan-Piet Mens (@jpmens)\"\n - \"Ales Nosek (@noseka1)\"\n'''\n\nEXAMPLES = '''\n# Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes\n\n- ini_file: dest=/etc/anotherconf\n section=drinks\n option=temperature\n value=cold\n backup=yes\n'''\n\nimport os\nimport re\n\n# ==============================================================\n# match_opt\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# match_active_opt\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# do_ini\n\ndef do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):\n\n\n if not os.path.exists(filename):\n try:\n open(filename,'w').close()\n except:\n module.fail_json(msg=\"Destination file %s not writable\" % filename)\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n finally:\n ini_file.close()\n\n within_section = not section\n section_start = 0\n changed = False\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n ini_lines.insert(index, assignment_format % (option, value))\n changed = True\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n changed = ini_lines[index] != newline\n ini_lines[index] = newline\n if changed:\n # remove all possible option occurences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n else:\n # comment out the existing option line\n if match_active_opt(option, line):\n ini_lines[index] = '#%s' % ini_lines[index]\n changed = True\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return (changed, backup_file)\n\n# ==============================================================\n# main\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n dest = dict(required=True),\n section = dict(required=True),\n option = dict(required=False),\n value = dict(required=False),\n backup = dict(default='no', type='bool'),\n state = dict(default='present', choices=['present', 'absent']),\n no_extra_spaces = dict(required=False, default=False, type='bool')\n ),\n add_file_common_args = True,\n supports_check_mode = True\n )\n\n dest = os.path.expanduser(module.params['dest'])\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n\n (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)\n\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = { 'changed': changed, 'msg': \"OK\", 'dest': dest }\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "files/ini_file.py"}]} | 3,468 | 200 |
gh_patches_debug_35792 | rasdani/github-patches | git_diff | joke2k__faker-677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update user_agent chrome version
Right now provider **user_agent** can return chrome version between 13-15 which is too small (for example latest stable version is 63). I want to create PR to fix this.
</issue>
<code>
[start of faker/providers/user_agent/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 from datetime import datetime
6
7 from .. import BaseProvider
8
9
10 class Provider(BaseProvider):
11 user_agents = (
12 'chrome', 'firefox', 'internet_explorer', 'opera', 'safari',
13 )
14
15 windows_platform_tokens = (
16 'Windows 95', 'Windows 98', 'Windows 98; Win 9x 4.90', 'Windows CE',
17 'Windows NT 4.0', 'Windows NT 5.0', 'Windows NT 5.01',
18 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1',
19 'Windows NT 6.2',
20 )
21
22 linux_processors = ('i686', 'x86_64',)
23
24 mac_processors = ('Intel', 'PPC', 'U; Intel', 'U; PPC',)
25
26 def mac_processor(self):
27 return self.random_element(self.mac_processors)
28
29 def linux_processor(self):
30 return self.random_element(self.linux_processors)
31
32 def user_agent(self):
33 name = self.random_element(self.user_agents)
34 return getattr(self, name)()
35
36 def chrome(self):
37 saf = str(self.generator.random.randint(531, 536)) + \
38 str(self.generator.random.randint(0, 2))
39 tmplt = '({0}) AppleWebKit/{1} (KHTML, like Gecko)' \
40 ' Chrome/{2}.0.{3}.0 Safari/{4}'
41 platforms = (
42 tmplt.format(self.linux_platform_token(),
43 saf,
44 self.generator.random.randint(13, 15),
45 self.generator.random.randint(800, 899),
46 saf),
47 tmplt.format(self.windows_platform_token(),
48 saf,
49 self.generator.random.randint(13, 15),
50 self.generator.random.randint(800, 899),
51 saf),
52 tmplt.format(self.mac_platform_token(),
53 saf,
54 self.generator.random.randint(13, 15),
55 self.generator.random.randint(800, 899),
56 saf),
57 )
58
59 return 'Mozilla/5.0 ' + self.random_element(platforms)
60
61 def firefox(self):
62 ver = (
63 'Gecko/{0} Firefox/{1}.0'.format(
64 self.generator.date_time_between(
65 datetime(2011, 1, 1)
66 ),
67 self.generator.random.randint(4, 15)
68 ),
69 'Gecko/{0} Firefox/3.6.{1}'.format(
70 self.generator.date_time_between(
71 datetime(2010, 1, 1)
72 ),
73 self.generator.random.randint(1, 20)),
74 'Gecko/{0} Firefox/3.8'.format(
75 self.generator.date_time_between(datetime(2010, 1, 1)),
76 ),
77 )
78 tmplt_win = '({0}; {1}; rv:1.9.{2}.20) {3}'
79 tmplt_lin = '({0}; rv:1.9.{1}.20) {2}'
80 tmplt_mac = '({0}; rv:1.9.{1}.20) {2}'
81 platforms = (
82 tmplt_win.format(self.windows_platform_token(),
83 self.generator.locale().replace('_', '-'),
84 self.generator.random.randint(0, 2),
85 self.generator.random.choice(ver)),
86 tmplt_lin.format(self.linux_platform_token(),
87 self.generator.random.randint(5, 7),
88 self.generator.random.choice(ver)),
89 tmplt_mac.format(self.mac_platform_token(),
90 self.generator.random.randint(2, 6),
91 self.generator.random.choice(ver)),
92 )
93
94 return 'Mozilla/5.0 ' + self.random_element(platforms)
95
96 def safari(self):
97 saf = "{0}.{1}.{2}".format(self.generator.random.randint(531, 535),
98 self.generator.random.randint(1, 50),
99 self.generator.random.randint(1, 7))
100 if not self.generator.random.getrandbits(1):
101 ver = "{0}.{1}".format(self.generator.random.randint(4, 5),
102 self.generator.random.randint(0, 1))
103 else:
104 ver = "{0}.0.{1}".format(self.generator.random.randint(4, 5),
105 self.generator.random.randint(1, 5))
106 tmplt_win = '(Windows; U; {0}) AppleWebKit/{1} (KHTML, like Gecko)' \
107 ' Version/{2} Safari/{3}'
108 tmplt_mac = '({0} rv:{1}.0; {2}) AppleWebKit/{3} (KHTML, like Gecko)' \
109 ' Version/{4} Safari/{5}'
110 tmplt_ipod = '(iPod; U; CPU iPhone OS {0}_{1} like Mac OS X; {2})' \
111 ' AppleWebKit/{3} (KHTML, like Gecko) Version/{4}.0.5' \
112 ' Mobile/8B{5} Safari/6{6}'
113 locale = self.generator.locale().replace('_', '-')
114 platforms = (
115 tmplt_win.format(self.windows_platform_token(),
116 saf,
117 ver,
118 saf),
119 tmplt_mac.format(self.mac_platform_token(),
120 self.generator.random.randint(2, 6),
121 locale,
122 saf,
123 ver,
124 saf),
125 tmplt_ipod.format(self.generator.random.randint(3, 4),
126 self.generator.random.randint(0, 3),
127 locale,
128 saf,
129 self.generator.random.randint(3, 4),
130 self.generator.random.randint(111, 119),
131 saf),
132 )
133
134 return 'Mozilla/5.0 ' + self.random_element(platforms)
135
136 def opera(self):
137 platform = '({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(
138 (
139 self.linux_platform_token()
140 if self.generator.random.getrandbits(1)
141 else self.windows_platform_token()
142 ),
143 self.generator.locale().replace('_', '-'),
144 self.generator.random.randint(160, 190),
145 self.generator.random.randint(10, 12),
146 )
147 return 'Opera/{0}.{1}.{2}'.format(
148 self.generator.random.randint(8, 9),
149 self.generator.random.randint(10, 99),
150 platform,
151 )
152
153 def internet_explorer(self):
154 tmplt = 'Mozilla/5.0 (compatible; MSIE {0}.0; {1}; Trident/{2}.{3})'
155 return tmplt.format(self.generator.random.randint(5, 9),
156 self.windows_platform_token(),
157 self.generator.random.randint(3, 5),
158 self.generator.random.randint(0, 1))
159
160 def windows_platform_token(self):
161 return self.random_element(self.windows_platform_tokens)
162
163 def linux_platform_token(self):
164 return 'X11; Linux {0}'.format(
165 self.random_element(self.linux_processors))
166
167 def mac_platform_token(self):
168 return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(
169 self.random_element(self.mac_processors),
170 self.generator.random.randint(5, 8),
171 self.generator.random.randint(0, 9),
172 )
173
[end of faker/providers/user_agent/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/user_agent/__init__.py b/faker/providers/user_agent/__init__.py
--- a/faker/providers/user_agent/__init__.py
+++ b/faker/providers/user_agent/__init__.py
@@ -33,7 +33,8 @@
name = self.random_element(self.user_agents)
return getattr(self, name)()
- def chrome(self):
+ def chrome(self, version_from=13, version_to=63,
+ build_from=800, build_to=899):
saf = str(self.generator.random.randint(531, 536)) + \
str(self.generator.random.randint(0, 2))
tmplt = '({0}) AppleWebKit/{1} (KHTML, like Gecko)' \
@@ -41,18 +42,18 @@
platforms = (
tmplt.format(self.linux_platform_token(),
saf,
- self.generator.random.randint(13, 15),
- self.generator.random.randint(800, 899),
+ self.generator.random.randint(version_from, version_to),
+ self.generator.random.randint(build_from, build_to),
saf),
tmplt.format(self.windows_platform_token(),
saf,
- self.generator.random.randint(13, 15),
- self.generator.random.randint(800, 899),
+ self.generator.random.randint(version_from, version_to),
+ self.generator.random.randint(build_from, build_to),
saf),
tmplt.format(self.mac_platform_token(),
saf,
- self.generator.random.randint(13, 15),
- self.generator.random.randint(800, 899),
+ self.generator.random.randint(version_from, version_to),
+ self.generator.random.randint(build_from, build_to),
saf),
)
@@ -167,6 +168,6 @@
def mac_platform_token(self):
return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(
self.random_element(self.mac_processors),
- self.generator.random.randint(5, 8),
+ self.generator.random.randint(5, 12),
self.generator.random.randint(0, 9),
)
| {"golden_diff": "diff --git a/faker/providers/user_agent/__init__.py b/faker/providers/user_agent/__init__.py\n--- a/faker/providers/user_agent/__init__.py\n+++ b/faker/providers/user_agent/__init__.py\n@@ -33,7 +33,8 @@\n name = self.random_element(self.user_agents)\n return getattr(self, name)()\n \n- def chrome(self):\n+ def chrome(self, version_from=13, version_to=63,\n+ build_from=800, build_to=899):\n saf = str(self.generator.random.randint(531, 536)) + \\\n str(self.generator.random.randint(0, 2))\n tmplt = '({0}) AppleWebKit/{1} (KHTML, like Gecko)' \\\n@@ -41,18 +42,18 @@\n platforms = (\n tmplt.format(self.linux_platform_token(),\n saf,\n- self.generator.random.randint(13, 15),\n- self.generator.random.randint(800, 899),\n+ self.generator.random.randint(version_from, version_to),\n+ self.generator.random.randint(build_from, build_to),\n saf),\n tmplt.format(self.windows_platform_token(),\n saf,\n- self.generator.random.randint(13, 15),\n- self.generator.random.randint(800, 899),\n+ self.generator.random.randint(version_from, version_to),\n+ self.generator.random.randint(build_from, build_to),\n saf),\n tmplt.format(self.mac_platform_token(),\n saf,\n- self.generator.random.randint(13, 15),\n- self.generator.random.randint(800, 899),\n+ self.generator.random.randint(version_from, version_to),\n+ self.generator.random.randint(build_from, build_to),\n saf),\n )\n \n@@ -167,6 +168,6 @@\n def mac_platform_token(self):\n return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(\n self.random_element(self.mac_processors),\n- self.generator.random.randint(5, 8),\n+ self.generator.random.randint(5, 12),\n self.generator.random.randint(0, 9),\n )\n", "issue": "Update user_agent chrome version\nRight now provider **user_agent** can return chrome version between 13-15 which is too small (for example latest stable version is 63). I want to create PR to fix this.\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n user_agents = (\n 'chrome', 'firefox', 'internet_explorer', 'opera', 'safari',\n )\n\n windows_platform_tokens = (\n 'Windows 95', 'Windows 98', 'Windows 98; Win 9x 4.90', 'Windows CE',\n 'Windows NT 4.0', 'Windows NT 5.0', 'Windows NT 5.01',\n 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1',\n 'Windows NT 6.2',\n )\n\n linux_processors = ('i686', 'x86_64',)\n\n mac_processors = ('Intel', 'PPC', 'U; Intel', 'U; PPC',)\n\n def mac_processor(self):\n return self.random_element(self.mac_processors)\n\n def linux_processor(self):\n return self.random_element(self.linux_processors)\n\n def user_agent(self):\n name = self.random_element(self.user_agents)\n return getattr(self, name)()\n\n def chrome(self):\n saf = str(self.generator.random.randint(531, 536)) + \\\n str(self.generator.random.randint(0, 2))\n tmplt = '({0}) AppleWebKit/{1} (KHTML, like Gecko)' \\\n ' Chrome/{2}.0.{3}.0 Safari/{4}'\n platforms = (\n tmplt.format(self.linux_platform_token(),\n saf,\n self.generator.random.randint(13, 15),\n self.generator.random.randint(800, 899),\n saf),\n tmplt.format(self.windows_platform_token(),\n saf,\n self.generator.random.randint(13, 15),\n self.generator.random.randint(800, 899),\n saf),\n tmplt.format(self.mac_platform_token(),\n saf,\n self.generator.random.randint(13, 15),\n self.generator.random.randint(800, 899),\n saf),\n )\n\n return 'Mozilla/5.0 ' + self.random_element(platforms)\n\n def firefox(self):\n ver = (\n 'Gecko/{0} Firefox/{1}.0'.format(\n self.generator.date_time_between(\n datetime(2011, 1, 1)\n ),\n self.generator.random.randint(4, 15)\n ),\n 'Gecko/{0} Firefox/3.6.{1}'.format(\n self.generator.date_time_between(\n datetime(2010, 1, 1)\n ),\n self.generator.random.randint(1, 20)),\n 'Gecko/{0} Firefox/3.8'.format(\n self.generator.date_time_between(datetime(2010, 1, 1)),\n ),\n )\n tmplt_win = '({0}; {1}; rv:1.9.{2}.20) {3}'\n tmplt_lin = '({0}; rv:1.9.{1}.20) {2}'\n tmplt_mac = '({0}; rv:1.9.{1}.20) {2}'\n platforms = (\n tmplt_win.format(self.windows_platform_token(),\n self.generator.locale().replace('_', '-'),\n self.generator.random.randint(0, 2),\n self.generator.random.choice(ver)),\n tmplt_lin.format(self.linux_platform_token(),\n self.generator.random.randint(5, 7),\n self.generator.random.choice(ver)),\n tmplt_mac.format(self.mac_platform_token(),\n self.generator.random.randint(2, 6),\n self.generator.random.choice(ver)),\n )\n\n return 'Mozilla/5.0 ' + self.random_element(platforms)\n\n def safari(self):\n saf = \"{0}.{1}.{2}\".format(self.generator.random.randint(531, 535),\n self.generator.random.randint(1, 50),\n self.generator.random.randint(1, 7))\n if not self.generator.random.getrandbits(1):\n ver = \"{0}.{1}\".format(self.generator.random.randint(4, 5),\n self.generator.random.randint(0, 1))\n else:\n ver = \"{0}.0.{1}\".format(self.generator.random.randint(4, 5),\n self.generator.random.randint(1, 5))\n tmplt_win = '(Windows; U; {0}) AppleWebKit/{1} (KHTML, like Gecko)' \\\n ' Version/{2} Safari/{3}'\n tmplt_mac = '({0} rv:{1}.0; {2}) AppleWebKit/{3} (KHTML, like Gecko)' \\\n ' Version/{4} Safari/{5}'\n tmplt_ipod = '(iPod; U; CPU iPhone OS {0}_{1} like Mac OS X; {2})' \\\n ' AppleWebKit/{3} (KHTML, like Gecko) Version/{4}.0.5' \\\n ' Mobile/8B{5} Safari/6{6}'\n locale = self.generator.locale().replace('_', '-')\n platforms = (\n tmplt_win.format(self.windows_platform_token(),\n saf,\n ver,\n saf),\n tmplt_mac.format(self.mac_platform_token(),\n self.generator.random.randint(2, 6),\n locale,\n saf,\n ver,\n saf),\n tmplt_ipod.format(self.generator.random.randint(3, 4),\n self.generator.random.randint(0, 3),\n locale,\n saf,\n self.generator.random.randint(3, 4),\n self.generator.random.randint(111, 119),\n saf),\n )\n\n return 'Mozilla/5.0 ' + self.random_element(platforms)\n\n def opera(self):\n platform = '({0}; {1}) Presto/2.9.{2} Version/{3}.00'.format(\n (\n self.linux_platform_token()\n if self.generator.random.getrandbits(1)\n else self.windows_platform_token()\n ),\n self.generator.locale().replace('_', '-'),\n self.generator.random.randint(160, 190),\n self.generator.random.randint(10, 12),\n )\n return 'Opera/{0}.{1}.{2}'.format(\n self.generator.random.randint(8, 9),\n self.generator.random.randint(10, 99),\n platform,\n )\n\n def internet_explorer(self):\n tmplt = 'Mozilla/5.0 (compatible; MSIE {0}.0; {1}; Trident/{2}.{3})'\n return tmplt.format(self.generator.random.randint(5, 9),\n self.windows_platform_token(),\n self.generator.random.randint(3, 5),\n self.generator.random.randint(0, 1))\n\n def windows_platform_token(self):\n return self.random_element(self.windows_platform_tokens)\n\n def linux_platform_token(self):\n return 'X11; Linux {0}'.format(\n self.random_element(self.linux_processors))\n\n def mac_platform_token(self):\n return 'Macintosh; {0} Mac OS X 10_{1}_{2}'.format(\n self.random_element(self.mac_processors),\n self.generator.random.randint(5, 8),\n self.generator.random.randint(0, 9),\n )\n", "path": "faker/providers/user_agent/__init__.py"}]} | 2,589 | 488 |
gh_patches_debug_30341 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7411 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiprocessing "spawn" not thread-safe on Linux.
<!--
Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller
- For questions and general support, use the discussions forum.
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
-->
<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->
## Description of the issue
When using multiprocessing with the "spawn" method on Linux, processes sometimes fail to start with the message: `FileNotFoundError: [Errno 2] No such file or directory: '/tmp/_MEIOchafX/multiprocessing_bug.py'` This happens if different threads try to launch processes concurrently. It would appear that the "spawn" method is not thread-safe when used with freeze support.
As mentioned below, this bug does not manifest when built with `--onedir`.
[debug.log](https://github.com/pyinstaller/pyinstaller/files/10560051/debug.log) contains import and bootloader logging for a failure case.
Q: Does this happen with "fork"?
A: No
Q: Does this happen when running from source?
A: No, only when packaged as a pyinstaller executable with `--onefile`.
Q: Does this happen on Windows?
A: Unknown
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.7.0```
* Version of Python: Python 3.10.6
* Platform: Ubuntu 22.04.1 LTS
* How you installed Python: apt
* Did you also try this on another platform?
* Ubuntu 18.04.6 LTS, pyinstaller 4.7, Python 3.7 - Bug is present
* WSL2 pyinstaller 4.7, Python 3.7 - Bug is present
* try the latest development version, using the following command:
```shell
pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
```
* follow *all* the instructions in our "If Things Go Wrong" Guide
(https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
* [x] start with clean installation
* [x] use the latest development version
* [x] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
* [x] Package your program in **--onedir mode** - **BUG DOES NOT MANIFEST**
* [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file - - **BUG DOES NOT MANIFEST**
* [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
### A minimal example program which shows the error
```python
import multiprocessing
import sys
from threading import Thread
DEFAULT_N = 3
def main():
try:
n = int(sys.argv[1])
except IndexError:
n=DEFAULT_N
threads = []
for i in range(n):
threads.append(Thread(target=foo, args=(i, )))
for i in range(n):
threads[i].start()
for i in range(n):
threads[i].join()
def foo(i):
multiprocessing_context = multiprocessing.get_context(method="spawn")
q = multiprocessing_context.Queue()
p = multiprocessing_context.Process(target=bar, args=(q, i), daemon=True)
p.start()
p.join()
def bar(q, i):
q.put('hello')
print(f"{i} Added to queue")
if __name__ == "__main__":
multiprocessing.freeze_support()
main()
```
### Stacktrace / full error message
Note: If you can't reproduce the bug, try increasing the parameter from 2 to 5 (or higher).
```
$> dist/multiprocessing_bug 2
0 Added to queue
Traceback (most recent call last):
File "multiprocessing_bug.py", line 34, in <module>
multiprocessing.freeze_support()
File "PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py", line 49, in _freeze_support
File "multiprocessing/spawn.py", line 116, in spawn_main
File "multiprocessing/spawn.py", line 125, in _main
File "multiprocessing/spawn.py", line 236, in prepare
File "multiprocessing/spawn.py", line 287, in _fixup_main_from_path
File "runpy.py", line 288, in run_path
File "runpy.py", line 252, in _get_code_from_file
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/_MEIOchafX/multiprocessing_bug.py'
[8216] Failed to execute script 'multiprocessing_bug' due to unhandled exception!
```
### Workaround
As shown below, adding a lock around the call to `process.start()` seems to resolve the issue.
```python
import multiprocessing
import sys
from threading import Thread, Lock
DEFAULT_N = 3
def main():
try:
n = int(sys.argv[1])
except IndexError:
n=DEFAULT_N
threads = []
for i in range(n):
threads.append(Thread(target=foo, args=(i, )))
for i in range(n):
threads[i].start()
for i in range(n):
threads[i].join()
lock = Lock()
def foo(i):
multiprocessing_context = multiprocessing.get_context(method="spawn")
q = multiprocessing_context.Queue()
p = multiprocessing_context.Process(target=bar, args=(q, i), daemon=True)
with lock:
p.start()
p.join()
def bar(q, i):
q.put('hello')
print(f"{i} Added to queue")
if __name__ == "__main__":
multiprocessing.freeze_support()
main()
```
</issue>
<code>
[start of PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2017-2023, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 import multiprocessing
13 import multiprocessing.spawn as spawn
14 # 'spawn' multiprocessing needs some adjustments on osx
15 import os
16 import sys
17 from subprocess import _args_from_interpreter_flags
18
19 # prevent spawn from trying to read __main__ in from the main script
20 multiprocessing.process.ORIGINAL_DIR = None
21
22
23 def _freeze_support():
24 # We want to catch the two processes that are spawned by the multiprocessing code:
25 # - the semaphore tracker, which cleans up named semaphores in the spawn multiprocessing mode
26 # - the fork server, which keeps track of worker processes in forkserver mode.
27 # both of these processes are started by spawning a new copy of the running executable, passing it the flags from
28 # _args_from_interpreter_flags and then "-c" and an import statement.
29 # Look for those flags and the import statement, then exec() the code ourselves.
30
31 if (
32 len(sys.argv) >= 2 and sys.argv[-2] == '-c' and sys.argv[-1].startswith((
33 'from multiprocessing.semaphore_tracker import main', # Py<3.8
34 'from multiprocessing.resource_tracker import main', # Py>=3.8
35 'from multiprocessing.forkserver import main'
36 )) and set(sys.argv[1:-2]) == set(_args_from_interpreter_flags())
37 ):
38 exec(sys.argv[-1])
39 sys.exit()
40
41 if spawn.is_forking(sys.argv):
42 kwds = {}
43 for arg in sys.argv[2:]:
44 name, value = arg.split('=')
45 if value == 'None':
46 kwds[name] = None
47 else:
48 kwds[name] = int(value)
49 spawn.spawn_main(**kwds)
50 sys.exit()
51
52
53 multiprocessing.freeze_support = spawn.freeze_support = _freeze_support
54
55 # Bootloader unsets _MEIPASS2 for child processes to allow running PyInstaller binaries inside pyinstaller binaries.
56 # This is ok for mac or unix with fork() system call. But on Windows we need to overcome missing fork() function.
57
58 if sys.platform.startswith('win'):
59 import multiprocessing.popen_spawn_win32 as forking
60 else:
61 import multiprocessing.popen_fork as forking
62 import multiprocessing.popen_spawn_posix as spawning
63
64
65 # Mix-in to re-set _MEIPASS2 from sys._MEIPASS.
66 class FrozenSupportMixIn:
67 def __init__(self, *args, **kw):
68 if hasattr(sys, 'frozen'):
69 # We have to set original _MEIPASS2 value from sys._MEIPASS to get --onefile mode working.
70 os.putenv('_MEIPASS2', sys._MEIPASS) # @UndefinedVariable
71 try:
72 super().__init__(*args, **kw)
73 finally:
74 if hasattr(sys, 'frozen'):
75 # On some platforms (e.g. AIX) 'os.unsetenv()' is not available. In those cases we cannot delete the
76 # variable but only set it to the empty string. The bootloader can handle this case.
77 if hasattr(os, 'unsetenv'):
78 os.unsetenv('_MEIPASS2')
79 else:
80 os.putenv('_MEIPASS2', '')
81
82
83 # Patch forking.Popen to re-set _MEIPASS2 from sys._MEIPASS.
84 class _Popen(FrozenSupportMixIn, forking.Popen):
85 pass
86
87
88 forking.Popen = _Popen
89
90 if not sys.platform.startswith('win'):
91 # Patch spawning.Popen to re-set _MEIPASS2 from sys._MEIPASS.
92 class _Spawning_Popen(FrozenSupportMixIn, spawning.Popen):
93 pass
94
95 spawning.Popen = _Spawning_Popen
96
[end of PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py b/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py
@@ -9,6 +9,7 @@
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
+import threading
import multiprocessing
import multiprocessing.spawn as spawn
# 'spawn' multiprocessing needs some adjustments on osx
@@ -64,14 +65,17 @@
# Mix-in to re-set _MEIPASS2 from sys._MEIPASS.
class FrozenSupportMixIn:
+ _lock = threading.Lock()
+
def __init__(self, *args, **kw):
- if hasattr(sys, 'frozen'):
+ # The whole code block needs be executed under a lock to prevent race conditions between `os.putenv` and
+ # `os.unsetenv` calls when processes are spawned concurrently from multiple threads. See #7410.
+ with self._lock:
# We have to set original _MEIPASS2 value from sys._MEIPASS to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS) # @UndefinedVariable
- try:
- super().__init__(*args, **kw)
- finally:
- if hasattr(sys, 'frozen'):
+ try:
+ super().__init__(*args, **kw)
+ finally:
# On some platforms (e.g. AIX) 'os.unsetenv()' is not available. In those cases we cannot delete the
# variable but only set it to the empty string. The bootloader can handle this case.
if hasattr(os, 'unsetenv'):
| {"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py b/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py\n@@ -9,6 +9,7 @@\n # SPDX-License-Identifier: Apache-2.0\n #-----------------------------------------------------------------------------\n \n+import threading\n import multiprocessing\n import multiprocessing.spawn as spawn\n # 'spawn' multiprocessing needs some adjustments on osx\n@@ -64,14 +65,17 @@\n \n # Mix-in to re-set _MEIPASS2 from sys._MEIPASS.\n class FrozenSupportMixIn:\n+ _lock = threading.Lock()\n+\n def __init__(self, *args, **kw):\n- if hasattr(sys, 'frozen'):\n+ # The whole code block needs be executed under a lock to prevent race conditions between `os.putenv` and\n+ # `os.unsetenv` calls when processes are spawned concurrently from multiple threads. See #7410.\n+ with self._lock:\n # We have to set original _MEIPASS2 value from sys._MEIPASS to get --onefile mode working.\n os.putenv('_MEIPASS2', sys._MEIPASS) # @UndefinedVariable\n- try:\n- super().__init__(*args, **kw)\n- finally:\n- if hasattr(sys, 'frozen'):\n+ try:\n+ super().__init__(*args, **kw)\n+ finally:\n # On some platforms (e.g. AIX) 'os.unsetenv()' is not available. In those cases we cannot delete the\n # variable but only set it to the empty string. The bootloader can handle this case.\n if hasattr(os, 'unsetenv'):\n", "issue": "Multiprocessing \"spawn\" not thread-safe on Linux.\n<!--\r\nWelcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller\r\n - For questions and general support, use the discussions forum.\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n-->\r\n\r\n<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->\r\n\r\n## Description of the issue\r\n\r\nWhen using multiprocessing with the \"spawn\" method on Linux, processes sometimes fail to start with the message: `FileNotFoundError: [Errno 2] No such file or directory: '/tmp/_MEIOchafX/multiprocessing_bug.py'` This happens if different threads try to launch processes concurrently. It would appear that the \"spawn\" method is not thread-safe when used with freeze support.\r\n\r\nAs mentioned below, this bug does not manifest when built with `--onedir`.\r\n\r\n[debug.log](https://github.com/pyinstaller/pyinstaller/files/10560051/debug.log) contains import and bootloader logging for a failure case. \r\n\r\n\r\nQ: Does this happen with \"fork\"? \r\nA: No\r\n\r\nQ: Does this happen when running from source?\r\nA: No, only when packaged as a pyinstaller executable with `--onefile`.\r\n\r\nQ: Does this happen on Windows?\r\nA: Unknown\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.7.0```\r\n* Version of Python: Python 3.10.6\r\n* Platform: Ubuntu 22.04.1 LTS\r\n* How you installed Python: apt\r\n* Did you also try this on another platform?\r\n * Ubuntu 18.04.6 LTS, pyinstaller 4.7, Python 3.7 - Bug is present\r\n * WSL2 pyinstaller 4.7, Python 3.7 - Bug is present\r\n\r\n\r\n* try the latest development version, using the following command:\r\n\r\n```shell\r\npip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n```\r\n\r\n* follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n\r\n### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n * [x] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n * [x] Package your program in **--onedir mode** - **BUG DOES NOT MANIFEST**\r\n * [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file - - **BUG DOES NOT MANIFEST**\r\n * [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n\r\n\r\n### A minimal example program which shows the error\r\n\r\n```python\r\nimport multiprocessing\r\nimport sys\r\nfrom threading import Thread\r\n\r\nDEFAULT_N = 3\r\n\r\ndef main():\r\n try:\r\n n = int(sys.argv[1])\r\n except IndexError:\r\n n=DEFAULT_N\r\n\r\n threads = []\r\n for i in range(n):\r\n threads.append(Thread(target=foo, args=(i, )))\r\n for i in range(n):\r\n threads[i].start()\r\n for i in range(n):\r\n threads[i].join()\r\n\r\ndef foo(i):\r\n multiprocessing_context = multiprocessing.get_context(method=\"spawn\")\r\n q = multiprocessing_context.Queue()\r\n p = multiprocessing_context.Process(target=bar, args=(q, i), daemon=True)\r\n p.start()\r\n p.join()\r\n\r\n\r\ndef bar(q, i):\r\n q.put('hello')\r\n print(f\"{i} Added to queue\")\r\n\r\nif __name__ == \"__main__\":\r\n multiprocessing.freeze_support()\r\n main()\r\n\r\n```\r\n\r\n### Stacktrace / full error message\r\n\r\nNote: If you can't reproduce the bug, try increasing the parameter from 2 to 5 (or higher).\r\n\r\n```\r\n$> dist/multiprocessing_bug 2\r\n0 Added to queue\r\nTraceback (most recent call last):\r\n File \"multiprocessing_bug.py\", line 34, in <module>\r\n multiprocessing.freeze_support()\r\n File \"PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py\", line 49, in _freeze_support\r\n File \"multiprocessing/spawn.py\", line 116, in spawn_main\r\n File \"multiprocessing/spawn.py\", line 125, in _main\r\n File \"multiprocessing/spawn.py\", line 236, in prepare\r\n File \"multiprocessing/spawn.py\", line 287, in _fixup_main_from_path\r\n File \"runpy.py\", line 288, in run_path\r\n File \"runpy.py\", line 252, in _get_code_from_file\r\nFileNotFoundError: [Errno 2] No such file or directory: '/tmp/_MEIOchafX/multiprocessing_bug.py'\r\n[8216] Failed to execute script 'multiprocessing_bug' due to unhandled exception!\r\n\r\n```\r\n\r\n### Workaround\r\n\r\nAs shown below, adding a lock around the call to `process.start()` seems to resolve the issue.\r\n\r\n```python\r\nimport multiprocessing\r\nimport sys\r\nfrom threading import Thread, Lock\r\n\r\nDEFAULT_N = 3\r\n\r\ndef main():\r\n try:\r\n n = int(sys.argv[1])\r\n except IndexError:\r\n n=DEFAULT_N\r\n\r\n threads = []\r\n for i in range(n):\r\n threads.append(Thread(target=foo, args=(i, )))\r\n for i in range(n):\r\n threads[i].start()\r\n for i in range(n):\r\n threads[i].join()\r\n\r\nlock = Lock()\r\ndef foo(i):\r\n multiprocessing_context = multiprocessing.get_context(method=\"spawn\")\r\n q = multiprocessing_context.Queue()\r\n p = multiprocessing_context.Process(target=bar, args=(q, i), daemon=True)\r\n\r\n with lock:\r\n p.start()\r\n\r\n p.join()\r\n\r\n\r\ndef bar(q, i):\r\n q.put('hello')\r\n print(f\"{i} Added to queue\")\r\n\r\nif __name__ == \"__main__\":\r\n multiprocessing.freeze_support()\r\n main()\r\n\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2017-2023, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\nimport multiprocessing\nimport multiprocessing.spawn as spawn\n# 'spawn' multiprocessing needs some adjustments on osx\nimport os\nimport sys\nfrom subprocess import _args_from_interpreter_flags\n\n# prevent spawn from trying to read __main__ in from the main script\nmultiprocessing.process.ORIGINAL_DIR = None\n\n\ndef _freeze_support():\n # We want to catch the two processes that are spawned by the multiprocessing code:\n # - the semaphore tracker, which cleans up named semaphores in the spawn multiprocessing mode\n # - the fork server, which keeps track of worker processes in forkserver mode.\n # both of these processes are started by spawning a new copy of the running executable, passing it the flags from\n # _args_from_interpreter_flags and then \"-c\" and an import statement.\n # Look for those flags and the import statement, then exec() the code ourselves.\n\n if (\n len(sys.argv) >= 2 and sys.argv[-2] == '-c' and sys.argv[-1].startswith((\n 'from multiprocessing.semaphore_tracker import main', # Py<3.8\n 'from multiprocessing.resource_tracker import main', # Py>=3.8\n 'from multiprocessing.forkserver import main'\n )) and set(sys.argv[1:-2]) == set(_args_from_interpreter_flags())\n ):\n exec(sys.argv[-1])\n sys.exit()\n\n if spawn.is_forking(sys.argv):\n kwds = {}\n for arg in sys.argv[2:]:\n name, value = arg.split('=')\n if value == 'None':\n kwds[name] = None\n else:\n kwds[name] = int(value)\n spawn.spawn_main(**kwds)\n sys.exit()\n\n\nmultiprocessing.freeze_support = spawn.freeze_support = _freeze_support\n\n# Bootloader unsets _MEIPASS2 for child processes to allow running PyInstaller binaries inside pyinstaller binaries.\n# This is ok for mac or unix with fork() system call. But on Windows we need to overcome missing fork() function.\n\nif sys.platform.startswith('win'):\n import multiprocessing.popen_spawn_win32 as forking\nelse:\n import multiprocessing.popen_fork as forking\n import multiprocessing.popen_spawn_posix as spawning\n\n\n# Mix-in to re-set _MEIPASS2 from sys._MEIPASS.\nclass FrozenSupportMixIn:\n def __init__(self, *args, **kw):\n if hasattr(sys, 'frozen'):\n # We have to set original _MEIPASS2 value from sys._MEIPASS to get --onefile mode working.\n os.putenv('_MEIPASS2', sys._MEIPASS) # @UndefinedVariable\n try:\n super().__init__(*args, **kw)\n finally:\n if hasattr(sys, 'frozen'):\n # On some platforms (e.g. AIX) 'os.unsetenv()' is not available. In those cases we cannot delete the\n # variable but only set it to the empty string. The bootloader can handle this case.\n if hasattr(os, 'unsetenv'):\n os.unsetenv('_MEIPASS2')\n else:\n os.putenv('_MEIPASS2', '')\n\n\n# Patch forking.Popen to re-set _MEIPASS2 from sys._MEIPASS.\nclass _Popen(FrozenSupportMixIn, forking.Popen):\n pass\n\n\nforking.Popen = _Popen\n\nif not sys.platform.startswith('win'):\n # Patch spawning.Popen to re-set _MEIPASS2 from sys._MEIPASS.\n class _Spawning_Popen(FrozenSupportMixIn, spawning.Popen):\n pass\n\n spawning.Popen = _Spawning_Popen\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_multiprocessing.py"}]} | 3,047 | 402 |
gh_patches_debug_13193 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make plugin integtest.sh run against non-snapshot build
The plugin integtest.sh picks up the opensearch version provided in build.gradle, which is 1.1.0-SNAPSHOT. Since the release candidates are non snapshot built artifacts, make this configurable in integ test job
</issue>
<code>
[start of bundle-workflow/src/paths/script_finder.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 class ScriptFinder:
11 class ScriptNotFoundError(Exception):
12 def __init__(self, kind, paths):
13 self.kind = kind
14 self.paths = paths
15 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
16
17 component_scripts_path = os.path.realpath(
18 os.path.join(
19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components"
20 )
21 )
22
23 default_scripts_path = os.path.realpath(
24 os.path.join(
25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default"
26 )
27 )
28
29 """
30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
31
32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
33 it will look in the following locations, in order:
34 * Root of the Git repository
35 * /scripts/<script-name> in the Git repository
36 * <component_scripts_path>/<component_name>/<script-name>
37 * <default_scripts_path>/<script-name>
38
39 For install.sh scripts, given a component name, it will look in the following locations, in order:
40 * <component_scripts_path>/<component_name>/<script-name>
41 * <default_scripts_path>/<script-name>
42 """
43
44 @classmethod
45 def __find_script(cls, name, paths):
46 script = next(filter(lambda path: os.path.exists(path), paths), None)
47 if script is None:
48 raise ScriptFinder.ScriptNotFoundError(name, paths)
49 return script
50
51 @classmethod
52 def find_build_script(cls, component_name, git_dir):
53 paths = [
54 os.path.realpath(os.path.join(git_dir, "build.sh")),
55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")),
56 os.path.realpath(
57 os.path.join(cls.component_scripts_path, component_name, "build.sh")
58 ),
59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")),
60 ]
61
62 return cls.__find_script("build.sh", paths)
63
64 @classmethod
65 def find_integ_test_script(cls, component_name, git_dir):
66 paths = [
67 os.path.realpath(os.path.join(git_dir, "integtest.sh")),
68 os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
69 os.path.realpath(
70 os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
71 ),
72 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")),
73 ]
74
75 return cls.__find_script("integtest.sh", paths)
76
77 @classmethod
78 def find_install_script(cls, component_name):
79 paths = [
80 os.path.realpath(
81 os.path.join(cls.component_scripts_path, component_name, "install.sh")
82 ),
83 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
84 ]
85
86 return cls.__find_script("install.sh", paths)
87
[end of bundle-workflow/src/paths/script_finder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py
--- a/bundle-workflow/src/paths/script_finder.py
+++ b/bundle-workflow/src/paths/script_finder.py
@@ -64,8 +64,9 @@
@classmethod
def find_integ_test_script(cls, component_name, git_dir):
paths = [
- os.path.realpath(os.path.join(git_dir, "integtest.sh")),
- os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
+ # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
+ # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
+ # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
os.path.realpath(
os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
),
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -64,8 +64,9 @@\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n- os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n- os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n+ # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n+ # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n+ # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n", "issue": "Make plugin integtest.sh run against non-snapshot build\nThe plugin integtest.sh picks up the opensearch version provided in build.gradle, which is 1.1.0-SNAPSHOT. Since the release candidates are non snapshot built artifacts, make this configurable in integ test job\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]} | 1,445 | 214 |
gh_patches_debug_1486 | rasdani/github-patches | git_diff | certbot__certbot-8776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix lint and mypy with Python < 3.8
In https://github.com/certbot/certbot/pull/8748, we made a change that causes our lint and mypy tests to need to be run on Python 3.8+ to pass. See https://github.com/certbot/certbot/pull/8748#issuecomment-808790093 for the discussion of the problem here.
I don't think we should do this. Certbot supports Python 3.6+ and I think it could cause a particularly bad experience for new devs that don't happen to know they need Python 3.8+. This change also broke our development Dockerfile as can be seen at https://dev.azure.com/certbot/certbot/_build/results?buildId=3742&view=logs&j=bea2d267-f41e-5b33-7b51-a88065a8cbb0&t=0dc90756-6888-5ee6-5a6a-5855e6b9ae76&l=1873. Instead, I think we should change our approach here so the tests work on all versions of Python we support. I'm open to other ideas, but the two ideas I had for this are:
1. Just declare a runtime dependency on `typing-extensions`.
2. Add `typing-extensions` as a dev/test dependency and try to import it, but use similar fallback code to what we current have if it's not available.
What do you think @adferrand? Are you interested in working on this?
</issue>
<code>
[start of certbot/setup.py]
1 import codecs
2 from distutils.version import LooseVersion
3 import os
4 import re
5 import sys
6
7 from setuptools import __version__ as setuptools_version
8 from setuptools import find_packages
9 from setuptools import setup
10
11 min_setuptools_version='39.0.1'
12 # This conditional isn't necessary, but it provides better error messages to
13 # people who try to install this package with older versions of setuptools.
14 if LooseVersion(setuptools_version) < LooseVersion(min_setuptools_version):
15 raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')
16
17 # Workaround for https://bugs.python.org/issue8876, see
18 # https://bugs.python.org/issue8876#msg208792
19 # This can be removed when using Python 2.7.9 or later:
20 # https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS
21 if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
22 del os.link
23
24
25 def read_file(filename, encoding='utf8'):
26 """Read unicode from given file."""
27 with codecs.open(filename, encoding=encoding) as fd:
28 return fd.read()
29
30
31 here = os.path.abspath(os.path.dirname(__file__))
32
33 # read version number (and other metadata) from package init
34 init_fn = os.path.join(here, 'certbot', '__init__.py')
35 meta = dict(re.findall(r"""__([a-z]+)__ = '([^']+)""", read_file(init_fn)))
36
37 readme = read_file(os.path.join(here, 'README.rst'))
38 version = meta['version']
39
40 # This package relies on PyOpenSSL and requests, however, it isn't specified
41 # here to avoid masking the more specific request requirements in acme. See
42 # https://github.com/pypa/pip/issues/988 for more info.
43 install_requires = [
44 'acme>=1.8.0',
45 # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but
46 # saying so here causes a runtime error against our temporary fork of 0.9.3
47 # in which we added 2.6 support (see #2243), so we relax the requirement.
48 'ConfigArgParse>=0.9.3',
49 'configobj>=5.0.6',
50 'cryptography>=2.1.4',
51 'distro>=1.0.1',
52 # 1.1.0+ is required to avoid the warnings described at
53 # https://github.com/certbot/josepy/issues/13.
54 'josepy>=1.1.0',
55 'parsedatetime>=2.4',
56 'pyrfc3339',
57 'pytz',
58 # This dependency needs to be added using environment markers to avoid its
59 # installation on Linux.
60 'pywin32>=300 ; sys_platform == "win32"',
61 f'setuptools>={min_setuptools_version}',
62 'zope.component',
63 'zope.interface',
64 ]
65
66 dev_extras = [
67 'astroid',
68 'azure-devops',
69 'coverage',
70 'ipdb',
71 'mypy',
72 'PyGithub',
73 # 1.1.0+ is required for poetry to use the poetry-core library for the
74 # build system declared in tools/pinning/pyproject.toml.
75 'poetry>=1.1.0',
76 'pylint',
77 'pytest',
78 'pytest-cov',
79 'pytest-xdist',
80 'tox',
81 'twine',
82 'wheel',
83 ]
84
85 docs_extras = [
86 # If you have Sphinx<1.5.1, you need docutils<0.13.1
87 # https://github.com/sphinx-doc/sphinx/issues/3212
88 'repoze.sphinx.autointerface',
89 'Sphinx>=1.2', # Annotation support
90 'sphinx_rtd_theme',
91 ]
92
93 setup(
94 name='certbot',
95 version=version,
96 description="ACME client",
97 long_description=readme,
98 url='https://github.com/letsencrypt/letsencrypt',
99 author="Certbot Project",
100 author_email='[email protected]',
101 license='Apache License 2.0',
102 python_requires='>=3.6',
103 classifiers=[
104 'Development Status :: 5 - Production/Stable',
105 'Environment :: Console',
106 'Environment :: Console :: Curses',
107 'Intended Audience :: System Administrators',
108 'License :: OSI Approved :: Apache Software License',
109 'Operating System :: POSIX :: Linux',
110 'Programming Language :: Python',
111 'Programming Language :: Python :: 3',
112 'Programming Language :: Python :: 3.6',
113 'Programming Language :: Python :: 3.7',
114 'Programming Language :: Python :: 3.8',
115 'Programming Language :: Python :: 3.9',
116 'Topic :: Internet :: WWW/HTTP',
117 'Topic :: Security',
118 'Topic :: System :: Installation/Setup',
119 'Topic :: System :: Networking',
120 'Topic :: System :: Systems Administration',
121 'Topic :: Utilities',
122 ],
123
124 packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),
125 include_package_data=True,
126
127 install_requires=install_requires,
128 extras_require={
129 'dev': dev_extras,
130 'docs': docs_extras,
131 },
132
133 entry_points={
134 'console_scripts': [
135 'certbot = certbot.main:main',
136 ],
137 'certbot.plugins': [
138 'manual = certbot._internal.plugins.manual:Authenticator',
139 'null = certbot._internal.plugins.null:Installer',
140 'standalone = certbot._internal.plugins.standalone:Authenticator',
141 'webroot = certbot._internal.plugins.webroot:Authenticator',
142 ],
143 },
144 )
145
[end of certbot/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot/setup.py b/certbot/setup.py
--- a/certbot/setup.py
+++ b/certbot/setup.py
@@ -77,6 +77,9 @@
'pytest',
'pytest-cov',
'pytest-xdist',
+ # typing-extensions is required to import typing.Protocol and make the mypy checks
+ # pass (along with pylint about non-existent objects) on Python 3.6 & 3.7
+ 'typing-extensions',
'tox',
'twine',
'wheel',
| {"golden_diff": "diff --git a/certbot/setup.py b/certbot/setup.py\n--- a/certbot/setup.py\n+++ b/certbot/setup.py\n@@ -77,6 +77,9 @@\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n+ # typing-extensions is required to import typing.Protocol and make the mypy checks\n+ # pass (along with pylint about non-existent objects) on Python 3.6 & 3.7\n+ 'typing-extensions',\n 'tox',\n 'twine',\n 'wheel',\n", "issue": "Fix lint and mypy with Python < 3.8\nIn https://github.com/certbot/certbot/pull/8748, we made a change that causes our lint and mypy tests to need to be run on Python 3.8+ to pass. See https://github.com/certbot/certbot/pull/8748#issuecomment-808790093 for the discussion of the problem here.\r\n\r\nI don't think we should do this. Certbot supports Python 3.6+ and I think it could cause a particularly bad experience for new devs that don't happen to know they need Python 3.8+. This change also broke our development Dockerfile as can be seen at https://dev.azure.com/certbot/certbot/_build/results?buildId=3742&view=logs&j=bea2d267-f41e-5b33-7b51-a88065a8cbb0&t=0dc90756-6888-5ee6-5a6a-5855e6b9ae76&l=1873. Instead, I think we should change our approach here so the tests work on all versions of Python we support. I'm open to other ideas, but the two ideas I had for this are:\r\n\r\n1. Just declare a runtime dependency on `typing-extensions`.\r\n2. Add `typing-extensions` as a dev/test dependency and try to import it, but use similar fallback code to what we current have if it's not available.\r\n\r\nWhat do you think @adferrand? Are you interested in working on this?\n", "before_files": [{"content": "import codecs\nfrom distutils.version import LooseVersion\nimport os\nimport re\nimport sys\n\nfrom setuptools import __version__ as setuptools_version\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nmin_setuptools_version='39.0.1'\n# This conditional isn't necessary, but it provides better error messages to\n# people who try to install this package with older versions of setuptools.\nif LooseVersion(setuptools_version) < LooseVersion(min_setuptools_version):\n raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')\n\n# Workaround for https://bugs.python.org/issue8876, see\n# https://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL and requests, however, it isn't specified\n# here to avoid masking the more specific request requirements in acme. See\n# https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n 'acme>=1.8.0',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj>=5.0.6',\n 'cryptography>=2.1.4',\n 'distro>=1.0.1',\n # 1.1.0+ is required to avoid the warnings described at\n # https://github.com/certbot/josepy/issues/13.\n 'josepy>=1.1.0',\n 'parsedatetime>=2.4',\n 'pyrfc3339',\n 'pytz',\n # This dependency needs to be added using environment markers to avoid its\n # installation on Linux.\n 'pywin32>=300 ; sys_platform == \"win32\"',\n f'setuptools>={min_setuptools_version}',\n 'zope.component',\n 'zope.interface',\n]\n\ndev_extras = [\n 'astroid',\n 'azure-devops',\n 'coverage',\n 'ipdb',\n 'mypy',\n 'PyGithub',\n # 1.1.0+ is required for poetry to use the poetry-core library for the\n # build system declared in tools/pinning/pyproject.toml.\n 'poetry>=1.1.0',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'tox',\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n # If you have Sphinx<1.5.1, you need docutils<0.13.1\n # https://github.com/sphinx-doc/sphinx/issues/3212\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.2', # Annotation support\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme,\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot._internal.plugins.manual:Authenticator',\n 'null = certbot._internal.plugins.null:Installer',\n 'standalone = certbot._internal.plugins.standalone:Authenticator',\n 'webroot = certbot._internal.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "certbot/setup.py"}]} | 2,499 | 125 |
gh_patches_debug_32163 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Google Style Docstrings
We should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.
This consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.
If you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:
- [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
Docstrings that need to be updated:
- main.py
- [x] configure_lang
- [ ] configure_log
- [ ] get_logging_level
- [ ] check_dependencies
- [ ] print_version
- [ ] print_example_config
- [ ] edit_files
- [x] welcome_message
- ~~helper.py~~
- [x] get_opsdroid
- [x] del_rw
- [x] move_config_to_appdir
- memory.py
- [x] Memory
- [x] get
- [x] put
- [x] _get_from_database
- [x] _put_to_database
- message.py
- [x] Message
- [x] __init__
- [x] _thinking_delay
- [x] _typing delay
- [x] respond
- [x] react
- web.py
- [ ] Web
- [x] get_port
- [x] get_host
- [x] get_ssl_context
- [ ] start
- [ ] build_response
- [ ] web_index_handler
- [ ] web_stats_handler
- matchers.py
- [ ] match_regex
- [ ] match_apiai_action
- [ ] match_apiai_intent
- [ ] match_dialogflow_action
- [ ] match_dialogflow_intent
- [ ] match_luisai_intent
- [ ] match_rasanlu
- [ ] match_recastai
- [ ] match_witai
- [ ] match_crontab
- [ ] match_webhook
- [ ] match_always
- core.py
- [ ] OpsDroid
- [ ] default_connector
- [ ] exit
- [ ] critical
- [ ] call_stop
- [ ] disconnect
- [ ] stop
- [ ] load
- [ ] start_loop
- [x] setup_skills
- [ ] train_parsers
- [ ] start_connector_tasks
- [ ] start_database
- [ ] run_skill
- [ ] get_ranked_skills
- [ ] parse
- loader.py
- [ ] Loader
- [x] import_module_from_spec
- [x] import_module
- [x] check_cache
- [x] build_module_import_path
- [x] build_module_install_path
- [x] git_clone
- [x] git_pull
- [x] pip_install_deps
- [x] create_default_config
- [x] load_config_file
- [ ] envvar_constructor
- [ ] include_constructor
- [x] setup_modules_directory
- [x] load_modules_from_config
- [x] _load_modules
- [x] _install_module
- [x] _update_module
- [ ] _install_git_module
- [x] _install_local_module
---- ORIGINAL POST ----
I've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings.
I think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).
The downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.
Here is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)
I would like to know what you all think about this idea and if its worth spending time on it.
</issue>
<code>
[start of opsdroid/cli/utils.py]
1 """Utilities for the opsdroid CLI commands."""
2
3 import click
4 import gettext
5 import os
6 import logging
7 import subprocess
8 import sys
9 import time
10 import warnings
11
12 from opsdroid.const import (
13 DEFAULT_LOG_FILENAME,
14 LOCALE_DIR,
15 DEFAULT_LANGUAGE,
16 DEFAULT_CONFIG_PATH,
17 )
18
19 _LOGGER = logging.getLogger("opsdroid")
20
21
22 def edit_files(ctx, param, value):
23 """Open config/log file with favourite editor."""
24 if value == "config":
25 file = DEFAULT_CONFIG_PATH
26 if ctx.command.name == "cli":
27 warn_deprecated_cli_option(
28 "The flag -e/--edit-files has been deprecated. "
29 "Please run `opsdroid config edit` instead."
30 )
31 elif value == "log":
32 file = DEFAULT_LOG_FILENAME
33 if ctx.command.name == "cli":
34 warn_deprecated_cli_option(
35 "The flag -l/--view-log has been deprecated. "
36 "Please run `opsdroid logs` instead."
37 )
38 else:
39 return
40
41 editor = os.environ.get("EDITOR", "vi")
42 if editor == "vi":
43 click.echo(
44 "You are about to edit a file in vim. \n"
45 "Read the tutorial on vim at: https://bit.ly/2HRvvrB"
46 )
47 time.sleep(3)
48
49 subprocess.run([editor, file])
50 ctx.exit(0)
51
52
53 def warn_deprecated_cli_option(text):
54 """Warn users that the cli option they have used is deprecated."""
55 print(f"Warning: {text}")
56 warnings.warn(text, DeprecationWarning)
57
58
59 def configure_lang(config):
60 """Configure app language based on user config.
61
62 Args:
63 config: Language Configuration and it uses ISO 639-1 code.
64 for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes
65
66
67 """
68 lang_code = config.get("lang", DEFAULT_LANGUAGE)
69 if lang_code != DEFAULT_LANGUAGE:
70 lang = gettext.translation("opsdroid", LOCALE_DIR, (lang_code,), fallback=True)
71 lang.install()
72
73
74 def check_dependencies():
75 """Check for system dependencies required by opsdroid."""
76 if sys.version_info.major < 3 or sys.version_info.minor < 6:
77 logging.critical(_("Whoops! opsdroid requires python 3.6 or above."))
78 sys.exit(1)
79
80
81 def welcome_message(config):
82 """Add welcome message if set to true in configuration.
83
84 Args:
85 config: config loaded by Loader
86
87 Raises:
88 KeyError: If 'welcome-message' key is not found in configuration file
89
90 """
91 try:
92 if config["welcome-message"]:
93 _LOGGER.info("=" * 40)
94 _LOGGER.info(
95 _(
96 "You can customise your opsdroid by modifying "
97 "your configuration.yaml"
98 )
99 )
100 _LOGGER.info(
101 _("Read more at: " "http://opsdroid.readthedocs.io/#configuration")
102 )
103 _LOGGER.info(_("Watch the Get Started Videos at: " "http://bit.ly/2fnC0Fh"))
104 _LOGGER.info(
105 _(
106 "Install Opsdroid Desktop at: \n"
107 "https://github.com/opsdroid/opsdroid-desktop/"
108 "releases"
109 )
110 )
111 _LOGGER.info("=" * 40)
112 except KeyError:
113 _LOGGER.warning(
114 _("'welcome-message: true/false' is missing in configuration.yaml")
115 )
116
[end of opsdroid/cli/utils.py]
[start of opsdroid/cli/config.py]
1 """The config subcommand for opsdroid cli."""
2
3 import click
4
5 from opsdroid.cli.utils import edit_files, warn_deprecated_cli_option
6 from opsdroid.const import EXAMPLE_CONFIG_FILE
7
8
9 def print_example_config(ctx, param, value):
10 """[Deprecated] Print out the example config."""
11 if not value or ctx.resilient_parsing:
12 return
13 if ctx.command.name == "cli":
14 warn_deprecated_cli_option(
15 "The flag --gen-config has been deprecated. "
16 "Please run `opsdroid config gen` instead."
17 )
18 with open(EXAMPLE_CONFIG_FILE, "r") as conf:
19 click.echo(conf.read())
20 ctx.exit(0)
21
22
23 @click.group()
24 def config():
25 """Subcommands related to opsdroid configuration."""
26
27
28 @config.command()
29 @click.pass_context
30 def gen(ctx):
31 """Print out the example config."""
32 print_example_config(ctx, None, True)
33
34
35 @config.command()
36 @click.pass_context
37 def edit(ctx):
38 """Print out the example config."""
39 edit_files(ctx, None, "config")
40
[end of opsdroid/cli/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/cli/config.py b/opsdroid/cli/config.py
--- a/opsdroid/cli/config.py
+++ b/opsdroid/cli/config.py
@@ -7,7 +7,19 @@
def print_example_config(ctx, param, value):
- """[Deprecated] Print out the example config."""
+ """[Deprecated] Print out the example config.
+
+ Args:
+ ctx (:obj:`click.Context`): The current click cli context.
+ param (dict): a dictionary of all parameters pass to the click
+ context when invoking this function as a callback.
+ value (bool): the value of this parameter after invocation.
+ Defaults to False, set to True when this flag is called.
+
+ Returns:
+ int: the exit code. Always returns 0 in this case.
+
+ """
if not value or ctx.resilient_parsing:
return
if ctx.command.name == "cli":
diff --git a/opsdroid/cli/utils.py b/opsdroid/cli/utils.py
--- a/opsdroid/cli/utils.py
+++ b/opsdroid/cli/utils.py
@@ -20,7 +20,21 @@
def edit_files(ctx, param, value):
- """Open config/log file with favourite editor."""
+ """Open config/log file with favourite editor.
+
+ Args:
+ ctx (:obj:`click.Context`): The current click cli context.
+ param (dict): a dictionary of all parameters pass to the click
+ context when invoking this function as a callback.
+ value (string): the value of this parameter after invocation.
+ It is either "config" or "log" depending on the program
+ calling this function.
+
+ Returns:
+ int: the exit code. Always returns 0 in this case.
+
+ """
+
if value == "config":
file = DEFAULT_CONFIG_PATH
if ctx.command.name == "cli":
@@ -72,7 +86,13 @@
def check_dependencies():
- """Check for system dependencies required by opsdroid."""
+ """Check for system dependencies required by opsdroid.
+
+ Returns:
+ int: the exit code. Returns 1 if the Python version installed is
+ below 3.6.
+
+ """
if sys.version_info.major < 3 or sys.version_info.minor < 6:
logging.critical(_("Whoops! opsdroid requires python 3.6 or above."))
sys.exit(1)
| {"golden_diff": "diff --git a/opsdroid/cli/config.py b/opsdroid/cli/config.py\n--- a/opsdroid/cli/config.py\n+++ b/opsdroid/cli/config.py\n@@ -7,7 +7,19 @@\n \n \n def print_example_config(ctx, param, value):\n- \"\"\"[Deprecated] Print out the example config.\"\"\"\n+ \"\"\"[Deprecated] Print out the example config.\n+\n+ Args:\n+ ctx (:obj:`click.Context`): The current click cli context.\n+ param (dict): a dictionary of all parameters pass to the click\n+ context when invoking this function as a callback.\n+ value (bool): the value of this parameter after invocation.\n+ Defaults to False, set to True when this flag is called.\n+\n+ Returns:\n+ int: the exit code. Always returns 0 in this case.\n+\n+ \"\"\"\n if not value or ctx.resilient_parsing:\n return\n if ctx.command.name == \"cli\":\ndiff --git a/opsdroid/cli/utils.py b/opsdroid/cli/utils.py\n--- a/opsdroid/cli/utils.py\n+++ b/opsdroid/cli/utils.py\n@@ -20,7 +20,21 @@\n \n \n def edit_files(ctx, param, value):\n- \"\"\"Open config/log file with favourite editor.\"\"\"\n+ \"\"\"Open config/log file with favourite editor.\n+\n+ Args:\n+ ctx (:obj:`click.Context`): The current click cli context.\n+ param (dict): a dictionary of all parameters pass to the click\n+ context when invoking this function as a callback.\n+ value (string): the value of this parameter after invocation.\n+ It is either \"config\" or \"log\" depending on the program\n+ calling this function.\n+\n+ Returns:\n+ int: the exit code. Always returns 0 in this case.\n+\n+ \"\"\"\n+\n if value == \"config\":\n file = DEFAULT_CONFIG_PATH\n if ctx.command.name == \"cli\":\n@@ -72,7 +86,13 @@\n \n \n def check_dependencies():\n- \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n+ \"\"\"Check for system dependencies required by opsdroid.\n+\n+ Returns:\n+ int: the exit code. Returns 1 if the Python version installed is\n+ below 3.6.\n+\n+ \"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 6:\n logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n sys.exit(1)\n", "issue": "Add Google Style Docstrings\nWe should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.\r\n\r\nThis consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.\r\n\r\nIf you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:\r\n\r\n - [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)\r\n\r\n\r\n\r\nDocstrings that need to be updated:\r\n\r\n- main.py\r\n - [x] configure_lang\r\n - [ ] configure_log\r\n - [ ] get_logging_level\r\n - [ ] check_dependencies\r\n - [ ] print_version\r\n - [ ] print_example_config\r\n - [ ] edit_files\r\n - [x] welcome_message\r\n- ~~helper.py~~\r\n - [x] get_opsdroid\r\n - [x] del_rw\r\n - [x] move_config_to_appdir\r\n- memory.py\r\n - [x] Memory\r\n - [x] get\r\n - [x] put\r\n - [x] _get_from_database\r\n - [x] _put_to_database\r\n- message.py\r\n - [x] Message\r\n - [x] __init__\r\n - [x] _thinking_delay\r\n - [x] _typing delay\r\n - [x] respond\r\n - [x] react\r\n- web.py\r\n - [ ] Web\r\n - [x] get_port\r\n - [x] get_host\r\n - [x] get_ssl_context\r\n - [ ] start\r\n - [ ] build_response\r\n - [ ] web_index_handler\r\n - [ ] web_stats_handler\r\n- matchers.py\r\n - [ ] match_regex\r\n - [ ] match_apiai_action\r\n - [ ] match_apiai_intent\r\n - [ ] match_dialogflow_action\r\n - [ ] match_dialogflow_intent\r\n - [ ] match_luisai_intent\r\n - [ ] match_rasanlu\r\n - [ ] match_recastai\r\n - [ ] match_witai\r\n - [ ] match_crontab\r\n - [ ] match_webhook\r\n - [ ] match_always\r\n- core.py\r\n - [ ] OpsDroid\r\n - [ ] default_connector\r\n - [ ] exit\r\n - [ ] critical\r\n - [ ] call_stop\r\n - [ ] disconnect\r\n - [ ] stop\r\n - [ ] load\r\n - [ ] start_loop\r\n - [x] setup_skills\r\n - [ ] train_parsers\r\n - [ ] start_connector_tasks\r\n - [ ] start_database\r\n - [ ] run_skill\r\n - [ ] get_ranked_skills\r\n - [ ] parse\r\n- loader.py\r\n - [ ] Loader\r\n - [x] import_module_from_spec\r\n - [x] import_module\r\n - [x] check_cache\r\n - [x] build_module_import_path\r\n - [x] build_module_install_path\r\n - [x] git_clone\r\n - [x] git_pull\r\n - [x] pip_install_deps\r\n - [x] create_default_config\r\n - [x] load_config_file\r\n - [ ] envvar_constructor\r\n - [ ] include_constructor\r\n - [x] setup_modules_directory\r\n - [x] load_modules_from_config\r\n - [x] _load_modules\r\n - [x] _install_module\r\n - [x] _update_module\r\n - [ ] _install_git_module\r\n - [x] _install_local_module\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n---- ORIGINAL POST ---- \r\nI've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings. \r\n\r\nI think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).\r\n\r\nThe downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.\r\n\r\nHere is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)\r\n\r\nI would like to know what you all think about this idea and if its worth spending time on it.\n", "before_files": [{"content": "\"\"\"Utilities for the opsdroid CLI commands.\"\"\"\n\nimport click\nimport gettext\nimport os\nimport logging\nimport subprocess\nimport sys\nimport time\nimport warnings\n\nfrom opsdroid.const import (\n DEFAULT_LOG_FILENAME,\n LOCALE_DIR,\n DEFAULT_LANGUAGE,\n DEFAULT_CONFIG_PATH,\n)\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef edit_files(ctx, param, value):\n \"\"\"Open config/log file with favourite editor.\"\"\"\n if value == \"config\":\n file = DEFAULT_CONFIG_PATH\n if ctx.command.name == \"cli\":\n warn_deprecated_cli_option(\n \"The flag -e/--edit-files has been deprecated. \"\n \"Please run `opsdroid config edit` instead.\"\n )\n elif value == \"log\":\n file = DEFAULT_LOG_FILENAME\n if ctx.command.name == \"cli\":\n warn_deprecated_cli_option(\n \"The flag -l/--view-log has been deprecated. \"\n \"Please run `opsdroid logs` instead.\"\n )\n else:\n return\n\n editor = os.environ.get(\"EDITOR\", \"vi\")\n if editor == \"vi\":\n click.echo(\n \"You are about to edit a file in vim. \\n\"\n \"Read the tutorial on vim at: https://bit.ly/2HRvvrB\"\n )\n time.sleep(3)\n\n subprocess.run([editor, file])\n ctx.exit(0)\n\n\ndef warn_deprecated_cli_option(text):\n \"\"\"Warn users that the cli option they have used is deprecated.\"\"\"\n print(f\"Warning: {text}\")\n warnings.warn(text, DeprecationWarning)\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\n\n Args:\n config: Language Configuration and it uses ISO 639-1 code.\n for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes\n\n\n \"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\"opsdroid\", LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 6:\n logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\n\n Args:\n config: config loaded by Loader\n\n Raises:\n KeyError: If 'welcome-message' key is not found in configuration file\n\n \"\"\"\n try:\n if config[\"welcome-message\"]:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(\n _(\n \"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"\n )\n )\n _LOGGER.info(\n _(\"Read more at: \" \"http://opsdroid.readthedocs.io/#configuration\")\n )\n _LOGGER.info(_(\"Watch the Get Started Videos at: \" \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(\n _(\n \"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"\n )\n )\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(\n _(\"'welcome-message: true/false' is missing in configuration.yaml\")\n )\n", "path": "opsdroid/cli/utils.py"}, {"content": "\"\"\"The config subcommand for opsdroid cli.\"\"\"\n\nimport click\n\nfrom opsdroid.cli.utils import edit_files, warn_deprecated_cli_option\nfrom opsdroid.const import EXAMPLE_CONFIG_FILE\n\n\ndef print_example_config(ctx, param, value):\n \"\"\"[Deprecated] Print out the example config.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n if ctx.command.name == \"cli\":\n warn_deprecated_cli_option(\n \"The flag --gen-config has been deprecated. \"\n \"Please run `opsdroid config gen` instead.\"\n )\n with open(EXAMPLE_CONFIG_FILE, \"r\") as conf:\n click.echo(conf.read())\n ctx.exit(0)\n\n\[email protected]()\ndef config():\n \"\"\"Subcommands related to opsdroid configuration.\"\"\"\n\n\[email protected]()\[email protected]_context\ndef gen(ctx):\n \"\"\"Print out the example config.\"\"\"\n print_example_config(ctx, None, True)\n\n\[email protected]()\[email protected]_context\ndef edit(ctx):\n \"\"\"Print out the example config.\"\"\"\n edit_files(ctx, None, \"config\")\n", "path": "opsdroid/cli/config.py"}]} | 2,893 | 556 |
gh_patches_debug_19840 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5100 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hsapi /scimeta/elements endpoint errors with no authentication
**Description of the bug**
/scimeta/elements hsapi endpoint responds with a system error when no authentication credentials are provided.
Steps to reproduce the bug:
1. Go to https://www.hydroshare.org/hsapi/resource/16b27156f2b1483099f09d3b688e43fb/scimeta/elements/
1. See error
**Expected behavior**
API endpoints should handle requests that do not provide authentication. Unauthenticated requests that make publicly accessible requests should provide a response. Unauthenticated requests that make privately accessible requests should respond with a 403.
**Additional information**
Environment:
Request Method: GET
Request URL: http://localhost:8000/hsapi/resource/068d77bb3f4c442194e9f08056aca1cf/scimeta/elements/
Django Version: 1.11.29
Python Version: 3.6.9
Installed Applications:
('mezzanine.boot',
'test_without_migrations',
'autocomplete_light',
'django.contrib.auth',
'oauth2_provider',
'corsheaders',
'django.contrib.contenttypes',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.gis',
'django.contrib.postgres',
'inplaceeditform',
'django_nose',
'django_irods',
'drf_yasg',
'theme',
'theme.blog_mods',
'heartbeat',
'mezzanine.conf',
'mezzanine.core',
'mezzanine.generic',
'mezzanine.blog',
'mezzanine.forms',
'mezzanine.pages',
'mezzanine.galleries',
'crispy_forms',
'mezzanine.accounts',
'haystack',
'rest_framework',
'robots',
'hs_core',
'hs_access_control',
'hs_labels',
'hs_metrics',
'irods_browser_app',
'hs_geo_raster_resource',
'ref_ts',
'hs_app_timeseries',
'widget_tweaks',
'hs_app_netCDF',
'hs_model_program',
'hs_modelinstance',
'hs_tools_resource',
'hs_swat_modelinstance',
'hs_geographic_feature_resource',
'hs_script_resource',
'hs_sitemap',
'hs_collection_resource',
'hs_modflow_modelinstance',
'hs_tracking',
'hs_file_types',
'hs_composite_resource',
'hs_rest_api',
'hs_dictionary',
'hs_odm2',
'security',
'markdown',
'hs_communities',
'hs_discover',
'debug_toolbar',
'filebrowser_safe',
'grappelli_safe',
'django.contrib.admin',
'django.contrib.staticfiles',
'django_comments')
Installed Middleware:
('django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'mezzanine.core.request.CurrentRequestMiddleware',
'mezzanine.core.middleware.RedirectFallbackMiddleware',
'mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware',
'mezzanine.core.middleware.SitePermissionMiddleware',
'mezzanine.pages.middleware.PageMiddleware',
'hs_core.robots.RobotFilter',
'hs_tracking.middleware.Tracking',
'debug_toolbar.middleware.DebugToolbarMiddleware')
Traceback:
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/exception.py" in inner
41. response = get_response(request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response
217. response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response
215. response = response.render()
File "/usr/local/lib/python3.6/site-packages/django/template/response.py" in render
107. self.content = self.rendered_content
File "/usr/local/lib/python3.6/site-packages/rest_framework/response.py" in rendered_content
70. ret = renderer.render(self.data, accepted_media_type, context)
File "/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py" in render
724. context = self.get_context(data, accepted_media_type, renderer_context)
File "/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py" in get_context
656. raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
File "/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py" in get_raw_data_form
552. serializer = view.get_serializer(instance=instance)
File "/usr/local/lib/python3.6/site-packages/rest_framework/generics.py" in get_serializer
110. return serializer_class(*args, **kwargs)
Exception Type: TypeError at /hsapi/resource/068d77bb3f4c442194e9f08056aca1cf/scimeta/elements/
Exception Value: 'CoreMetaDataSerializer' object is not callable
</issue>
<code>
[start of hs_core/views/resource_metadata_rest_api.py]
1 import logging
2
3 from django.http import QueryDict
4
5 from rest_framework.response import Response
6 from rest_framework.exceptions import ValidationError
7 from rest_framework import status
8 from rest_framework import generics
9 from rest_framework import serializers
10
11 from hs_core import hydroshare
12 from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \
13 Format, FundingAgency, Identifier, Subject, Relation, GeospatialRelation
14 from hs_core.views import utils as view_utils
15 from hs_core.views.utils import ACTION_TO_AUTHORIZE
16
17 logger = logging.getLogger(__name__)
18
19
20 class Identifiers(serializers.DictField):
21 child = serializers.CharField()
22
23
24 class PartySerializer(serializers.Serializer):
25 name = serializers.CharField()
26 hydroshare_user_id = serializers.IntegerField(required=False)
27 organization = serializers.CharField(required=False)
28 email = serializers.EmailField(required=False)
29 address = serializers.CharField(required=False)
30 phone = serializers.CharField(required=False)
31 homepage = serializers.URLField(required=False)
32 identifiers = Identifiers(required=False)
33
34 class Meta:
35 model = Creator
36 fields = {'name', 'hydroshare_user_id', 'organization', 'email',
37 'address', 'phone', 'homepage', 'identifiers'}
38
39
40 class CreatorSerializer(PartySerializer):
41 order = serializers.IntegerField(required=False)
42
43 class Meta:
44 model = Contributor
45
46
47 class DateSerializer(serializers.Serializer):
48 # term = 'Date'
49 type = serializers.CharField(required=False)
50 start_date = serializers.DateTimeField(required=False)
51 end_date = serializers.DateTimeField(required=False)
52
53 class Meta:
54 model = Date
55
56
57 class CoverageSerializer(serializers.Serializer):
58 type = serializers.CharField(required=False)
59 value = serializers.SerializerMethodField(required=False)
60
61 class Meta:
62 model = Coverage
63
64 def get_value(self, obj):
65 return obj.value
66
67
68 class FormatSerializer(serializers.Serializer):
69 value = serializers.CharField(required=False)
70
71 class Meta:
72 model = Format
73
74
75 class FundingAgencySerializer(serializers.Serializer):
76 agency_name = serializers.CharField()
77 award_title = serializers.CharField(required=False)
78 award_number = serializers.CharField(required=False)
79 agency_url = serializers.URLField(required=False)
80
81 class Meta:
82 model = FundingAgency
83
84
85 class IdentifierSerializer(serializers.Serializer):
86 name = serializers.CharField(required=False)
87 url = serializers.URLField(required=False)
88
89 class Meta:
90 model = Identifier
91
92
93 class SubjectSerializer(serializers.Serializer):
94 value = serializers.CharField(required=False)
95
96 class Meta:
97 model = Subject
98
99
100 class RelationSerializer(serializers.Serializer):
101 type = serializers.CharField(required=False)
102 value = serializers.CharField(required=False)
103
104 class Meta:
105 model = Relation
106
107
108 class GeospatialRelationSerializer(RelationSerializer):
109 type = serializers.CharField(required=False)
110 value = serializers.CharField(required=False)
111 text = serializers.CharField(required=False)
112
113 class Meta:
114 model = GeospatialRelation
115
116
117 class CoreMetaDataSerializer(serializers.Serializer):
118 title = serializers.CharField(required=False)
119 creators = CreatorSerializer(required=False, many=True)
120 contributors = PartySerializer(required=False, many=True)
121 coverages = CoverageSerializer(required=False, many=True)
122 dates = DateSerializer(required=False, many=True)
123 description = serializers.CharField(required=False)
124 formats = FormatSerializer(required=False, many=True)
125 funding_agencies = FundingAgencySerializer(required=False, many=True)
126 identifiers = IdentifierSerializer(required=False, many=True)
127 language = serializers.CharField(required=False)
128 rights = serializers.CharField(required=False)
129 type = serializers.CharField(required=False)
130 publisher = serializers.CharField(required=False)
131 subjects = SubjectSerializer(required=False, many=True)
132 relations = RelationSerializer(required=False, many=True)
133 geospatialrelations = GeospatialRelationSerializer(required=False, many=True)
134
135 class Meta:
136 model = CoreMetaData
137
138
139 class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):
140 """
141 Retrieve resource science (Dublin Core) metadata
142
143 REST URL: /hsapi/resource/{pk}/scimeta/elements/
144 HTTP method: GET
145
146 :type pk: str
147 :param pk: id of the resource
148 :return: resource science metadata as JSON document
149 :rtype: str
150 :raises:
151 NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
152 PermissionDenied: return json format: {'detail': 'You do not have permission to perform
153 this action.'}
154
155 REST URL: /hsapi/resource/{pk}/scimeta/elements/
156 HTTP method: PUT
157
158 :type pk: str
159 :param pk: id of the resource
160 :type request: JSON formatted string
161 :param request: resource metadata
162 :return: updated resource science metadata as JSON document
163 :rtype: str
164 :raises:
165 NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
166 PermissionDenied: return json format: {'detail': 'You do not have permission to perform
167 this action.'}
168 ValidationError: return json format: {parameter-1': ['error message-1'],
169 'parameter-2': ['error message-2'], .. }
170 """
171 ACCEPT_FORMATS = ('application/json',)
172
173 allowed_methods = ('GET', 'PUT')
174
175 # Overwritten by resource types with extended metadata
176 serializer_class = CoreMetaDataSerializer
177
178 def get(self, request, pk):
179 view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
180 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
181 serializer = resource.metadata.serializer
182 self.serializer_class = resource.metadata.serializer
183 return Response(data=serializer.data, status=status.HTTP_200_OK)
184
185 def put(self, request, pk):
186 # Update science metadata
187 resource, _, _ = view_utils.authorize(
188 request, pk,
189 needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
190
191 metadata = []
192 put_data = request.data.copy()
193
194 # convert the QueryDict to dict
195 if isinstance(put_data, QueryDict):
196 put_data = put_data.dict()
197 try:
198 resource.metadata.parse_for_bulk_update(put_data, metadata)
199 hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
200 except Exception as ex:
201 error_msg = {
202 'resource': "Resource metadata update failed: %s, %s"
203 % (ex.__class__, str(ex))
204 }
205 raise ValidationError(detail=error_msg)
206
207 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
208 serializer = resource.metadata.serializer
209 self.serializer_class = serializer
210 return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)
211
[end of hs_core/views/resource_metadata_rest_api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py
--- a/hs_core/views/resource_metadata_rest_api.py
+++ b/hs_core/views/resource_metadata_rest_api.py
@@ -179,7 +179,6 @@
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
- self.serializer_class = resource.metadata.serializer
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk):
@@ -206,5 +205,4 @@
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
- self.serializer_class = serializer
return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)
| {"golden_diff": "diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py\n--- a/hs_core/views/resource_metadata_rest_api.py\n+++ b/hs_core/views/resource_metadata_rest_api.py\n@@ -179,7 +179,6 @@\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n- self.serializer_class = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n \n def put(self, request, pk):\n@@ -206,5 +205,4 @@\n \n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n- self.serializer_class = serializer\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n", "issue": "hsapi /scimeta/elements endpoint errors with no authentication\n**Description of the bug**\r\n/scimeta/elements hsapi endpoint responds with a system error when no authentication credentials are provided.\r\n\r\nSteps to reproduce the bug:\r\n1. Go to https://www.hydroshare.org/hsapi/resource/16b27156f2b1483099f09d3b688e43fb/scimeta/elements/\r\n1. See error\r\n\r\n**Expected behavior**\r\nAPI endpoints should handle requests that do not provide authentication. Unauthenticated requests that make publicly accessible requests should provide a response. Unauthenticated requests that make privately accessible requests should respond with a 403.\r\n\r\n**Additional information**\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/hsapi/resource/068d77bb3f4c442194e9f08056aca1cf/scimeta/elements/\r\n\r\nDjango Version: 1.11.29\r\nPython Version: 3.6.9\r\nInstalled Applications:\r\n('mezzanine.boot',\r\n 'test_without_migrations',\r\n 'autocomplete_light',\r\n 'django.contrib.auth',\r\n 'oauth2_provider',\r\n 'corsheaders',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.redirects',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.sitemaps',\r\n 'django.contrib.gis',\r\n 'django.contrib.postgres',\r\n 'inplaceeditform',\r\n 'django_nose',\r\n 'django_irods',\r\n 'drf_yasg',\r\n 'theme',\r\n 'theme.blog_mods',\r\n 'heartbeat',\r\n 'mezzanine.conf',\r\n 'mezzanine.core',\r\n 'mezzanine.generic',\r\n 'mezzanine.blog',\r\n 'mezzanine.forms',\r\n 'mezzanine.pages',\r\n 'mezzanine.galleries',\r\n 'crispy_forms',\r\n 'mezzanine.accounts',\r\n 'haystack',\r\n 'rest_framework',\r\n 'robots',\r\n 'hs_core',\r\n 'hs_access_control',\r\n 'hs_labels',\r\n 'hs_metrics',\r\n 'irods_browser_app',\r\n 'hs_geo_raster_resource',\r\n 'ref_ts',\r\n 'hs_app_timeseries',\r\n 'widget_tweaks',\r\n 'hs_app_netCDF',\r\n 'hs_model_program',\r\n 'hs_modelinstance',\r\n 'hs_tools_resource',\r\n 'hs_swat_modelinstance',\r\n 'hs_geographic_feature_resource',\r\n 'hs_script_resource',\r\n 'hs_sitemap',\r\n 'hs_collection_resource',\r\n 'hs_modflow_modelinstance',\r\n 'hs_tracking',\r\n 'hs_file_types',\r\n 'hs_composite_resource',\r\n 'hs_rest_api',\r\n 'hs_dictionary',\r\n 'hs_odm2',\r\n 'security',\r\n 'markdown',\r\n 'hs_communities',\r\n 'hs_discover',\r\n 'debug_toolbar',\r\n 'filebrowser_safe',\r\n 'grappelli_safe',\r\n 'django.contrib.admin',\r\n 'django.contrib.staticfiles',\r\n 'django_comments')\r\nInstalled Middleware:\r\n('django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.locale.LocaleMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'corsheaders.middleware.CorsMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'mezzanine.core.request.CurrentRequestMiddleware',\r\n 'mezzanine.core.middleware.RedirectFallbackMiddleware',\r\n 'mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware',\r\n 'mezzanine.core.middleware.SitePermissionMiddleware',\r\n 'mezzanine.pages.middleware.PageMiddleware',\r\n 'hs_core.robots.RobotFilter',\r\n 'hs_tracking.middleware.Tracking',\r\n 'debug_toolbar.middleware.DebugToolbarMiddleware')\r\n\r\n\r\n\r\nTraceback:\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/django/core/handlers/exception.py\" in inner\r\n 41. response = get_response(request)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py\" in _get_response\r\n 217. response = self.process_exception_by_middleware(e, request)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/django/core/handlers/base.py\" in _get_response\r\n 215. response = response.render()\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/django/template/response.py\" in render\r\n 107. self.content = self.rendered_content\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/rest_framework/response.py\" in rendered_content\r\n 70. ret = renderer.render(self.data, accepted_media_type, context)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py\" in render\r\n 724. context = self.get_context(data, accepted_media_type, renderer_context)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py\" in get_context\r\n 656. raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/rest_framework/renderers.py\" in get_raw_data_form\r\n 552. serializer = view.get_serializer(instance=instance)\r\n\r\nFile \"/usr/local/lib/python3.6/site-packages/rest_framework/generics.py\" in get_serializer\r\n 110. return serializer_class(*args, **kwargs)\r\n\r\nException Type: TypeError at /hsapi/resource/068d77bb3f4c442194e9f08056aca1cf/scimeta/elements/\r\nException Value: 'CoreMetaDataSerializer' object is not callable\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.http import QueryDict\n\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework import serializers\n\nfrom hs_core import hydroshare\nfrom hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\n Format, FundingAgency, Identifier, Subject, Relation, GeospatialRelation\nfrom hs_core.views import utils as view_utils\nfrom hs_core.views.utils import ACTION_TO_AUTHORIZE\n\nlogger = logging.getLogger(__name__)\n\n\nclass Identifiers(serializers.DictField):\n child = serializers.CharField()\n\n\nclass PartySerializer(serializers.Serializer):\n name = serializers.CharField()\n hydroshare_user_id = serializers.IntegerField(required=False)\n organization = serializers.CharField(required=False)\n email = serializers.EmailField(required=False)\n address = serializers.CharField(required=False)\n phone = serializers.CharField(required=False)\n homepage = serializers.URLField(required=False)\n identifiers = Identifiers(required=False)\n\n class Meta:\n model = Creator\n fields = {'name', 'hydroshare_user_id', 'organization', 'email',\n 'address', 'phone', 'homepage', 'identifiers'}\n\n\nclass CreatorSerializer(PartySerializer):\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = Contributor\n\n\nclass DateSerializer(serializers.Serializer):\n # term = 'Date'\n type = serializers.CharField(required=False)\n start_date = serializers.DateTimeField(required=False)\n end_date = serializers.DateTimeField(required=False)\n\n class Meta:\n model = Date\n\n\nclass CoverageSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.SerializerMethodField(required=False)\n\n class Meta:\n model = Coverage\n\n def get_value(self, obj):\n return obj.value\n\n\nclass FormatSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Format\n\n\nclass FundingAgencySerializer(serializers.Serializer):\n agency_name = serializers.CharField()\n award_title = serializers.CharField(required=False)\n award_number = serializers.CharField(required=False)\n agency_url = serializers.URLField(required=False)\n\n class Meta:\n model = FundingAgency\n\n\nclass IdentifierSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = Identifier\n\n\nclass SubjectSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Subject\n\n\nclass RelationSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Relation\n\n\nclass GeospatialRelationSerializer(RelationSerializer):\n type = serializers.CharField(required=False)\n value = serializers.CharField(required=False)\n text = serializers.CharField(required=False)\n\n class Meta:\n model = GeospatialRelation\n\n\nclass CoreMetaDataSerializer(serializers.Serializer):\n title = serializers.CharField(required=False)\n creators = CreatorSerializer(required=False, many=True)\n contributors = PartySerializer(required=False, many=True)\n coverages = CoverageSerializer(required=False, many=True)\n dates = DateSerializer(required=False, many=True)\n description = serializers.CharField(required=False)\n formats = FormatSerializer(required=False, many=True)\n funding_agencies = FundingAgencySerializer(required=False, many=True)\n identifiers = IdentifierSerializer(required=False, many=True)\n language = serializers.CharField(required=False)\n rights = serializers.CharField(required=False)\n type = serializers.CharField(required=False)\n publisher = serializers.CharField(required=False)\n subjects = SubjectSerializer(required=False, many=True)\n relations = RelationSerializer(required=False, many=True)\n geospatialrelations = GeospatialRelationSerializer(required=False, many=True)\n\n class Meta:\n model = CoreMetaData\n\n\nclass MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Retrieve resource science (Dublin Core) metadata\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: GET\n\n :type pk: str\n :param pk: id of the resource\n :return: resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: PUT\n\n :type pk: str\n :param pk: id of the resource\n :type request: JSON formatted string\n :param request: resource metadata\n :return: updated resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n ValidationError: return json format: {parameter-1': ['error message-1'],\n 'parameter-2': ['error message-2'], .. }\n \"\"\"\n ACCEPT_FORMATS = ('application/json',)\n\n allowed_methods = ('GET', 'PUT')\n\n # Overwritten by resource types with extended metadata\n serializer_class = CoreMetaDataSerializer\n\n def get(self, request, pk):\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n self.serializer_class = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, pk):\n # Update science metadata\n resource, _, _ = view_utils.authorize(\n request, pk,\n needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n\n metadata = []\n put_data = request.data.copy()\n\n # convert the QueryDict to dict\n if isinstance(put_data, QueryDict):\n put_data = put_data.dict()\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n except Exception as ex:\n error_msg = {\n 'resource': \"Resource metadata update failed: %s, %s\"\n % (ex.__class__, str(ex))\n }\n raise ValidationError(detail=error_msg)\n\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n self.serializer_class = serializer\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n", "path": "hs_core/views/resource_metadata_rest_api.py"}]} | 3,628 | 202 |
gh_patches_debug_16787 | rasdani/github-patches | git_diff | tensorflow__addons-219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WeightNormalization fails after update
Yesterday I cut a 0.3 release on pypi test, but as a sanity check on the weightnorm changes #208 I ran a quick regression check:
https://colab.research.google.com/drive/1RQlVR9X7vj8q3W3sJC6YA3Cq45XTXrau
The new layer is failing with loss of nan. Looking at the test coverage for this layer I'm struggling how to best catch this without running a full model. The losses for our toy test cases do not return nan so perhaps we need a true regression test like mnist?
EDIT -- Fix colab link
</issue>
<code>
[start of tensorflow_addons/layers/wrappers.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # =============================================================================
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import tensorflow as tf
20 from tensorflow_addons.utils import keras_utils
21
22
23 @keras_utils.register_keras_custom_object
24 class WeightNormalization(tf.keras.layers.Wrapper):
25 """This wrapper reparameterizes a layer by decoupling the weight's
26 magnitude and direction.
27
28 This speeds up convergence by improving the
29 conditioning of the optimization problem.
30 Weight Normalization: A Simple Reparameterization to Accelerate
31 Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
32 Tim Salimans, Diederik P. Kingma (2016)
33 WeightNormalization wrapper works for keras and tf layers.
34 ```python
35 net = WeightNormalization(
36 tf.keras.layers.Conv2D(2, 2, activation='relu'),
37 input_shape=(32, 32, 3),
38 data_init=True)(x)
39 net = WeightNormalization(
40 tf.keras.layers.Conv2D(16, 5, activation='relu'),
41 data_init=True)(net)
42 net = WeightNormalization(
43 tf.keras.layers.Dense(120, activation='relu'),
44 data_init=True)(net)
45 net = WeightNormalization(
46 tf.keras.layers.Dense(n_classes),
47 data_init=True)(net)
48 ```
49 Arguments:
50 layer: a layer instance.
51 data_init: If `True` use data dependent variable initialization
52 Raises:
53 ValueError: If not initialized with a `Layer` instance.
54 ValueError: If `Layer` does not contain a `kernel` of weights
55 NotImplementedError: If `data_init` is True and running graph execution
56 """
57
58 def __init__(self, layer, data_init=True, **kwargs):
59 super(WeightNormalization, self).__init__(layer, **kwargs)
60 self.data_init = data_init
61 self._initialized = False
62 self._track_trackable(layer, name='layer')
63
64 def build(self, input_shape):
65 """Build `Layer`"""
66 input_shape = tf.TensorShape(input_shape).as_list()
67 self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
68
69 if not self.layer.built:
70 self.layer.build(input_shape)
71
72 if not hasattr(self.layer, 'kernel'):
73 raise ValueError('`WeightNormalization` must wrap a layer that'
74 ' contains a `kernel` for weights')
75
76 # The kernel's filter or unit dimension is -1
77 self.layer_depth = int(self.layer.kernel.shape[-1])
78 self.kernel_norm_axes = list(
79 range(self.layer.kernel.shape.rank - 1))
80
81 self.v = self.layer.kernel
82 self.g = self.add_variable(
83 name="g",
84 shape=(self.layer_depth,),
85 initializer=tf.keras.initializers.get('ones'),
86 dtype=self.layer.kernel.dtype,
87 trainable=True)
88
89 super(WeightNormalization, self).build()
90
91 def call(self, inputs):
92 """Call `Layer`"""
93 if not self._initialized:
94 self._initialize_weights(inputs)
95
96 self._compute_weights() # Recompute weights for each forward pass
97 output = self.layer(inputs)
98 return output
99
100 def compute_output_shape(self, input_shape):
101 return tf.TensorShape(
102 self.layer.compute_output_shape(input_shape).as_list())
103
104 def _compute_weights(self):
105 """Generate normalized weights.
106
107 This method will update the value of self.layer.kernel with the
108 normalized value, so that the layer is ready for call().
109 """
110 with tf.name_scope('compute_weights'):
111 self.layer.kernel = tf.nn.l2_normalize(
112 self.v, axis=self.kernel_norm_axes) * self.g
113
114 def _initialize_weights(self, inputs):
115 """Initialize weight g.
116
117 The initial value of g could either from the initial value in v,
118 or by the input value if self.data_init is True.
119 """
120 if self.data_init:
121 self._data_dep_init(inputs)
122 else:
123 self._init_norm()
124 self._initialized = True
125
126 def _init_norm(self):
127 """Set the weight g with the norm of the weight vector."""
128 with tf.name_scope('init_norm'):
129 flat = tf.reshape(self.v, [-1, self.layer_depth])
130 self.g.assign(
131 tf.reshape(tf.linalg.norm(flat, axis=0), (self.layer_depth,)))
132
133 def _data_dep_init(self, inputs):
134 """Data dependent initialization."""
135
136 with tf.name_scope('data_dep_init'):
137 # Generate data dependent init values
138 existing_activation = self.layer.activation
139 self.layer.activation = None
140 x_init = self.layer(inputs)
141 data_norm_axes = list(range(x_init.shape.rank - 1))
142 m_init, v_init = tf.nn.moments(x_init, data_norm_axes)
143 scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
144
145 # Assign data dependent init values
146 self.g.assign(self.g * scale_init)
147 if hasattr(self.layer, 'bias'):
148 self.layer.bias.assign(-m_init * scale_init)
149 self.layer.activation = existing_activation
150
151 def get_config(self):
152 config = {'data_init': self.data_init}
153 base_config = super(WeightNormalization, self).get_config()
154 return dict(list(base_config.items()) + list(config.items()))
155
[end of tensorflow_addons/layers/wrappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py
--- a/tensorflow_addons/layers/wrappers.py
+++ b/tensorflow_addons/layers/wrappers.py
@@ -88,6 +88,7 @@
super(WeightNormalization, self).build()
+ @tf.function
def call(self, inputs):
"""Call `Layer`"""
if not self._initialized:
@@ -143,9 +144,9 @@
scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
# Assign data dependent init values
- self.g.assign(self.g * scale_init)
+ self.g = self.g * scale_init
if hasattr(self.layer, 'bias'):
- self.layer.bias.assign(-m_init * scale_init)
+ self.layer.bias = -m_init * scale_init
self.layer.activation = existing_activation
def get_config(self):
| {"golden_diff": "diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py\n--- a/tensorflow_addons/layers/wrappers.py\n+++ b/tensorflow_addons/layers/wrappers.py\n@@ -88,6 +88,7 @@\n \n super(WeightNormalization, self).build()\n \n+ @tf.function\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n if not self._initialized:\n@@ -143,9 +144,9 @@\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n \n # Assign data dependent init values\n- self.g.assign(self.g * scale_init)\n+ self.g = self.g * scale_init\n if hasattr(self.layer, 'bias'):\n- self.layer.bias.assign(-m_init * scale_init)\n+ self.layer.bias = -m_init * scale_init\n self.layer.activation = existing_activation\n \n def get_config(self):\n", "issue": "WeightNormalization fails after update\nYesterday I cut a 0.3 release on pypi test, but as a sanity check on the weightnorm changes #208 I ran a quick regression check:\r\n\r\nhttps://colab.research.google.com/drive/1RQlVR9X7vj8q3W3sJC6YA3Cq45XTXrau\r\n\r\nThe new layer is failing with loss of nan. Looking at the test coverage for this layer I'm struggling how to best catch this without running a full model. The losses for our toy test cases do not return nan so perhaps we need a true regression test like mnist?\r\n\r\nEDIT -- Fix colab link\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils import keras_utils\n\n\n@keras_utils.register_keras_custom_object\nclass WeightNormalization(tf.keras.layers.Wrapper):\n \"\"\"This wrapper reparameterizes a layer by decoupling the weight's\n magnitude and direction.\n\n This speeds up convergence by improving the\n conditioning of the optimization problem.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma (2016)\n WeightNormalization wrapper works for keras and tf layers.\n ```python\n net = WeightNormalization(\n tf.keras.layers.Conv2D(2, 2, activation='relu'),\n input_shape=(32, 32, 3),\n data_init=True)(x)\n net = WeightNormalization(\n tf.keras.layers.Conv2D(16, 5, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(120, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(n_classes),\n data_init=True)(net)\n ```\n Arguments:\n layer: a layer instance.\n data_init: If `True` use data dependent variable initialization\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n ValueError: If `Layer` does not contain a `kernel` of weights\n NotImplementedError: If `data_init` is True and running graph execution\n \"\"\"\n\n def __init__(self, layer, data_init=True, **kwargs):\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._initialized = False\n self._track_trackable(layer, name='layer')\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)\n\n if not self.layer.built:\n self.layer.build(input_shape)\n\n if not hasattr(self.layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n\n # The kernel's filter or unit dimension is -1\n self.layer_depth = int(self.layer.kernel.shape[-1])\n self.kernel_norm_axes = list(\n range(self.layer.kernel.shape.rank - 1))\n\n self.v = self.layer.kernel\n self.g = self.add_variable(\n name=\"g\",\n shape=(self.layer_depth,),\n initializer=tf.keras.initializers.get('ones'),\n dtype=self.layer.kernel.dtype,\n trainable=True)\n\n super(WeightNormalization, self).build()\n\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n if not self._initialized:\n self._initialize_weights(inputs)\n\n self._compute_weights() # Recompute weights for each forward pass\n output = self.layer(inputs)\n return output\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(\n self.layer.compute_output_shape(input_shape).as_list())\n\n def _compute_weights(self):\n \"\"\"Generate normalized weights.\n\n This method will update the value of self.layer.kernel with the\n normalized value, so that the layer is ready for call().\n \"\"\"\n with tf.name_scope('compute_weights'):\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * self.g\n\n def _initialize_weights(self, inputs):\n \"\"\"Initialize weight g.\n\n The initial value of g could either from the initial value in v,\n or by the input value if self.data_init is True.\n \"\"\"\n if self.data_init:\n self._data_dep_init(inputs)\n else:\n self._init_norm()\n self._initialized = True\n\n def _init_norm(self):\n \"\"\"Set the weight g with the norm of the weight vector.\"\"\"\n with tf.name_scope('init_norm'):\n flat = tf.reshape(self.v, [-1, self.layer_depth])\n self.g.assign(\n tf.reshape(tf.linalg.norm(flat, axis=0), (self.layer_depth,)))\n\n def _data_dep_init(self, inputs):\n \"\"\"Data dependent initialization.\"\"\"\n\n with tf.name_scope('data_dep_init'):\n # Generate data dependent init values\n existing_activation = self.layer.activation\n self.layer.activation = None\n x_init = self.layer(inputs)\n data_norm_axes = list(range(x_init.shape.rank - 1))\n m_init, v_init = tf.nn.moments(x_init, data_norm_axes)\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n self.g.assign(self.g * scale_init)\n if hasattr(self.layer, 'bias'):\n self.layer.bias.assign(-m_init * scale_init)\n self.layer.activation = existing_activation\n\n def get_config(self):\n config = {'data_init': self.data_init}\n base_config = super(WeightNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/wrappers.py"}]} | 2,330 | 222 |
gh_patches_debug_12300 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Model saving is calling on `git` which causes an issue when the `subprocess` library is from the system
**Describe the bug**
When the `subprocess` library is called from the system installation of python, `git` invocation does not work, since there is no git repo for the system python installation.
**To Reproduce**
Steps to reproduce the behavior:
1. Start GaNDLF training
2. See the following error:
```bash
$ > cat ${error_log}
[SNIP!]
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py", line 290, in training_loop
save_model(
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/utils/modelio.py", line 159, in save_model
subprocess.check_output(["git", "rev-parse", "HEAD"])
File "/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py", line 421, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py", line 503, in run
with Popen(*popenargs, **kwargs) as process:
File "/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py", line 971, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py", line 1847, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
```
**Expected behavior**
The model saving should work.
**Screenshots**
N.A.
**GaNDLF Version**
<!-- Put the output of the following command:
python -c 'import GANDLF as g;print(g.__version__)'
-->
0.0.17-dev
**Desktop (please complete the following information):**
N.A.
**Additional context**
This is seen for the IU BigRed cluster.
</issue>
<code>
[start of GANDLF/utils/modelio.py]
1 import hashlib
2 import os
3 import subprocess
4 from typing import Any, Dict
5
6 import torch
7
8 from ..version import __version__
9 from .generic import get_unique_timestamp
10
11 # these are the base keys for the model dictionary to save
12 model_dict_full = {
13 "epoch": 0,
14 "model_state_dict": None,
15 "optimizer_state_dict": None,
16 "loss": None,
17 "timestamp": None,
18 "timestamp_hash": None,
19 "git_hash": None,
20 "version": None,
21 }
22
23 model_dict_required = {
24 "model_state_dict": None,
25 "optimizer_state_dict": None,
26 }
27
28 best_model_path_end = "_best.pth.tar"
29 latest_model_path_end = "_latest.pth.tar"
30 initial_model_path_end = "_initial.pth.tar"
31
32
33 def optimize_and_save_model(model, params, path, onnx_export=True):
34 """
35 Perform post-training optimization and save it to a file.
36
37 Args:
38 model (torch.nn.Module): Trained torch model.
39 params (dict): The parameter dictionary.
40 path (str): The path to save the model dictionary to.
41 onnx_export (bool): Whether to export to ONNX and OpenVINO.
42 """
43 # Check if ONNX export is enabled in the parameter dictionary
44 onnx_export = params["model"].get("onnx_export", onnx_export)
45
46 # Check for incompatible topologies and disable ONNX export
47 # Customized imagenet_vgg no longer supported for ONNX export
48 if onnx_export:
49 architecture = params["model"]["architecture"]
50 if architecture in ["sdnet", "brain_age"] or "imagenet_vgg" in architecture:
51 onnx_export = False
52
53 if not onnx_export:
54 # Print a warning if ONNX export is disabled and not already warned
55 if "onnx_print" not in params:
56 print("WARNING: Current model is not supported by ONNX/OpenVINO!")
57 params["onnx_print"] = True
58 return
59 else:
60 try:
61 print("Optimizing the best model.")
62 num_channel = params["model"]["num_channels"]
63 model_dimension = params["model"]["dimension"]
64 input_shape = params["patch_size"]
65 onnx_path = path
66 if not onnx_path.endswith(".onnx"):
67 onnx_path = onnx_path.replace("pth.tar", "onnx")
68
69 if model_dimension == 2:
70 dummy_input = torch.randn(
71 (1, num_channel, input_shape[0], input_shape[1])
72 )
73 else:
74 dummy_input = torch.randn(
75 (1, num_channel, input_shape[0], input_shape[1], input_shape[2])
76 )
77
78 # Export the model to ONNX format
79 with torch.no_grad():
80 torch.onnx.export(
81 model.to("cpu"),
82 dummy_input.to("cpu"),
83 onnx_path,
84 opset_version=11,
85 export_params=True,
86 verbose=True,
87 input_names=["input"],
88 output_names=["output"],
89 )
90 except RuntimeWarning:
91 print("WARNING: Cannot export to ONNX model.")
92 return
93
94 # Check if OpenVINO is present and try to convert the ONNX model
95 openvino_present = False
96 try:
97 import openvino as ov
98 from openvino.tools.mo import convert_model
99 from openvino.runtime import get_version
100
101 openvino_present = False
102 # check for the correct openvino version to prevent inadvertent api breaks
103 if "2023.0.1" in get_version():
104 openvino_present = True
105 except ImportError:
106 print("WARNING: OpenVINO is not present.")
107
108 if openvino_present:
109 xml_path = onnx_path.replace("onnx", "xml")
110 bin_path = onnx_path.replace("onnx", "bin")
111 try:
112 if model_dimension == 2:
113 ov_model = convert_model(
114 onnx_path,
115 input_shape=(1, num_channel, input_shape[0], input_shape[1]),
116 )
117 else:
118 ov_model = convert_model(
119 onnx_path,
120 input_shape=(
121 1,
122 num_channel,
123 input_shape[0],
124 input_shape[1],
125 input_shape[2],
126 ),
127 )
128 ov.runtime.serialize(ov_model, xml_path=xml_path, bin_path=bin_path)
129 except Exception as e:
130 print("WARNING: OpenVINO Model Optimizer IR conversion failed: " + e)
131
132
133 def save_model(
134 model_dict: Dict[str, Any],
135 model: torch.nn.Module,
136 params: Dict[str, Any],
137 path: str,
138 onnx_export: bool = True,
139 ):
140 """
141 Save the model dictionary to a file.
142
143 Args:
144 model_dict (dict): Model dictionary to save.
145 model (torch.nn.Module): Trained torch model.
146 params (dict): The parameter dictionary.
147 path (str): The path to save the model dictionary to.
148 onnx_export (bool): Whether to export to ONNX and OpenVINO.
149 """
150 model_dict["timestamp"] = get_unique_timestamp()
151 model_dict["timestamp_hash"] = hashlib.sha256(
152 str(model_dict["timestamp"]).encode("utf-8")
153 ).hexdigest()
154 model_dict["version"] = __version__
155 model_dict["parameters"] = params
156
157 try:
158 model_dict["git_hash"] = (
159 subprocess.check_output(["git", "rev-parse", "HEAD"])
160 .decode("ascii")
161 .strip()
162 )
163 except subprocess.CalledProcessError:
164 model_dict["git_hash"] = None
165
166 torch.save(model_dict, path)
167
168 # post-training optimization
169 optimize_and_save_model(model, params, path, onnx_export=onnx_export)
170
171
172 def load_model(
173 path: str, device: torch.device, full_sanity_check: bool = True
174 ) -> Dict[str, Any]:
175 """
176 Load a model dictionary from a file.
177
178 Args:
179 path (str): The path to save the model dictionary to.
180 device (torch.device): The device to run the model on.
181 full_sanity_check (bool): Whether to run full sanity checking on the model.
182
183 Returns:
184 dict: Model dictionary containing model parameters and metadata.
185 """
186 model_dict = torch.load(path, map_location=device)
187
188 # check if the model dictionary is complete
189 if full_sanity_check:
190 incomplete_keys = [
191 key for key in model_dict_full.keys() if key not in model_dict.keys()
192 ]
193 if len(incomplete_keys) > 0:
194 raise RuntimeWarning(
195 "Model dictionary is incomplete; the following keys are missing:",
196 incomplete_keys,
197 )
198
199 # check if required keys are absent, and if so raise an error
200 incomplete_required_keys = [
201 key for key in model_dict_required.keys() if key not in model_dict.keys()
202 ]
203 if len(incomplete_required_keys) > 0:
204 raise KeyError(
205 "Model dictionary is incomplete; the following keys are missing:",
206 incomplete_required_keys,
207 )
208
209 return model_dict
210
211
212 def load_ov_model(path: str, device: str = "CPU"):
213 """
214 Load an OpenVINO IR model from an .xml file.
215
216 Args:
217 path (str): The path to the OpenVINO .xml file.
218 device (str): The device to run inference, can be "CPU", "GPU" or "MULTI:CPU,GPU". Default to be "CPU".
219
220 Returns:
221 exec_net (OpenVINO executable net): executable OpenVINO model.
222 input_blob (str): Input name.
223 output_blob (str): Output name.
224 """
225
226 try:
227 from openvino import runtime as ov
228 except ImportError:
229 raise ImportError("OpenVINO inference engine is not configured correctly.")
230
231 core = ov.Core()
232 if device.lower() == "cuda":
233 device = "GPU"
234
235 if device == "GPU":
236 core.set_property({"CACHE_DIR": os.path.dirname(os.path.abspath(path))})
237
238 model = core.read_model(model=path, weights=path.replace("xml", "bin"))
239 compiled_model = core.compile_model(model=model, device_name=device.upper())
240 input_layer = compiled_model.inputs
241 output_layer = compiled_model.outputs
242
243 return compiled_model, input_layer, output_layer
244
[end of GANDLF/utils/modelio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/GANDLF/utils/modelio.py b/GANDLF/utils/modelio.py
--- a/GANDLF/utils/modelio.py
+++ b/GANDLF/utils/modelio.py
@@ -155,13 +155,14 @@
model_dict["parameters"] = params
try:
+ # this will try to encode the git hash of the current GaNDLF codebase, and reverts to "None" if not found
model_dict["git_hash"] = (
- subprocess.check_output(["git", "rev-parse", "HEAD"])
+ subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=os.getcwd())
.decode("ascii")
.strip()
)
- except subprocess.CalledProcessError:
- model_dict["git_hash"] = None
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ model_dict["git_hash"] = "None"
torch.save(model_dict, path)
| {"golden_diff": "diff --git a/GANDLF/utils/modelio.py b/GANDLF/utils/modelio.py\n--- a/GANDLF/utils/modelio.py\n+++ b/GANDLF/utils/modelio.py\n@@ -155,13 +155,14 @@\n model_dict[\"parameters\"] = params\n \n try:\n+ # this will try to encode the git hash of the current GaNDLF codebase, and reverts to \"None\" if not found\n model_dict[\"git_hash\"] = (\n- subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"])\n+ subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=os.getcwd())\n .decode(\"ascii\")\n .strip()\n )\n- except subprocess.CalledProcessError:\n- model_dict[\"git_hash\"] = None\n+ except (subprocess.CalledProcessError, FileNotFoundError):\n+ model_dict[\"git_hash\"] = \"None\"\n \n torch.save(model_dict, path)\n", "issue": "Model saving is calling on `git` which causes an issue when the `subprocess` library is from the system\n**Describe the bug**\r\nWhen the `subprocess` library is called from the system installation of python, `git` invocation does not work, since there is no git repo for the system python installation. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Start GaNDLF training\r\n2. See the following error:\r\n```bash\r\n$ > cat ${error_log}\r\n[SNIP!]\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py\", line 290, in training_loop\r\n save_model(\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/utils/modelio.py\", line 159, in save_model\r\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"])\r\n File \"/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py\", line 421, in check_output\r\n return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,\r\n File \"/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py\", line 503, in run\r\n with Popen(*popenargs, **kwargs) as process:\r\n File \"/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py\", line 971, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/N/soft/sles15/deeplearning/Python-3.10.10/Lib/subprocess.py\", line 1847, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\n```\r\n\r\n**Expected behavior**\r\nThe model saving should work.\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\n<!-- Put the output of the following command:\r\npython -c 'import GANDLF as g;print(g.__version__)'\r\n-->\r\n0.0.17-dev\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nThis is seen for the IU BigRed cluster. \n", "before_files": [{"content": "import hashlib\nimport os\nimport subprocess\nfrom typing import Any, Dict\n\nimport torch\n\nfrom ..version import __version__\nfrom .generic import get_unique_timestamp\n\n# these are the base keys for the model dictionary to save\nmodel_dict_full = {\n \"epoch\": 0,\n \"model_state_dict\": None,\n \"optimizer_state_dict\": None,\n \"loss\": None,\n \"timestamp\": None,\n \"timestamp_hash\": None,\n \"git_hash\": None,\n \"version\": None,\n}\n\nmodel_dict_required = {\n \"model_state_dict\": None,\n \"optimizer_state_dict\": None,\n}\n\nbest_model_path_end = \"_best.pth.tar\"\nlatest_model_path_end = \"_latest.pth.tar\"\ninitial_model_path_end = \"_initial.pth.tar\"\n\n\ndef optimize_and_save_model(model, params, path, onnx_export=True):\n \"\"\"\n Perform post-training optimization and save it to a file.\n\n Args:\n model (torch.nn.Module): Trained torch model.\n params (dict): The parameter dictionary.\n path (str): The path to save the model dictionary to.\n onnx_export (bool): Whether to export to ONNX and OpenVINO.\n \"\"\"\n # Check if ONNX export is enabled in the parameter dictionary\n onnx_export = params[\"model\"].get(\"onnx_export\", onnx_export)\n\n # Check for incompatible topologies and disable ONNX export\n # Customized imagenet_vgg no longer supported for ONNX export\n if onnx_export:\n architecture = params[\"model\"][\"architecture\"]\n if architecture in [\"sdnet\", \"brain_age\"] or \"imagenet_vgg\" in architecture:\n onnx_export = False\n\n if not onnx_export:\n # Print a warning if ONNX export is disabled and not already warned\n if \"onnx_print\" not in params:\n print(\"WARNING: Current model is not supported by ONNX/OpenVINO!\")\n params[\"onnx_print\"] = True\n return\n else:\n try:\n print(\"Optimizing the best model.\")\n num_channel = params[\"model\"][\"num_channels\"]\n model_dimension = params[\"model\"][\"dimension\"]\n input_shape = params[\"patch_size\"]\n onnx_path = path\n if not onnx_path.endswith(\".onnx\"):\n onnx_path = onnx_path.replace(\"pth.tar\", \"onnx\")\n\n if model_dimension == 2:\n dummy_input = torch.randn(\n (1, num_channel, input_shape[0], input_shape[1])\n )\n else:\n dummy_input = torch.randn(\n (1, num_channel, input_shape[0], input_shape[1], input_shape[2])\n )\n\n # Export the model to ONNX format\n with torch.no_grad():\n torch.onnx.export(\n model.to(\"cpu\"),\n dummy_input.to(\"cpu\"),\n onnx_path,\n opset_version=11,\n export_params=True,\n verbose=True,\n input_names=[\"input\"],\n output_names=[\"output\"],\n )\n except RuntimeWarning:\n print(\"WARNING: Cannot export to ONNX model.\")\n return\n\n # Check if OpenVINO is present and try to convert the ONNX model\n openvino_present = False\n try:\n import openvino as ov\n from openvino.tools.mo import convert_model\n from openvino.runtime import get_version\n\n openvino_present = False\n # check for the correct openvino version to prevent inadvertent api breaks\n if \"2023.0.1\" in get_version():\n openvino_present = True\n except ImportError:\n print(\"WARNING: OpenVINO is not present.\")\n\n if openvino_present:\n xml_path = onnx_path.replace(\"onnx\", \"xml\")\n bin_path = onnx_path.replace(\"onnx\", \"bin\")\n try:\n if model_dimension == 2:\n ov_model = convert_model(\n onnx_path,\n input_shape=(1, num_channel, input_shape[0], input_shape[1]),\n )\n else:\n ov_model = convert_model(\n onnx_path,\n input_shape=(\n 1,\n num_channel,\n input_shape[0],\n input_shape[1],\n input_shape[2],\n ),\n )\n ov.runtime.serialize(ov_model, xml_path=xml_path, bin_path=bin_path)\n except Exception as e:\n print(\"WARNING: OpenVINO Model Optimizer IR conversion failed: \" + e)\n\n\ndef save_model(\n model_dict: Dict[str, Any],\n model: torch.nn.Module,\n params: Dict[str, Any],\n path: str,\n onnx_export: bool = True,\n):\n \"\"\"\n Save the model dictionary to a file.\n\n Args:\n model_dict (dict): Model dictionary to save.\n model (torch.nn.Module): Trained torch model.\n params (dict): The parameter dictionary.\n path (str): The path to save the model dictionary to.\n onnx_export (bool): Whether to export to ONNX and OpenVINO.\n \"\"\"\n model_dict[\"timestamp\"] = get_unique_timestamp()\n model_dict[\"timestamp_hash\"] = hashlib.sha256(\n str(model_dict[\"timestamp\"]).encode(\"utf-8\")\n ).hexdigest()\n model_dict[\"version\"] = __version__\n model_dict[\"parameters\"] = params\n\n try:\n model_dict[\"git_hash\"] = (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"])\n .decode(\"ascii\")\n .strip()\n )\n except subprocess.CalledProcessError:\n model_dict[\"git_hash\"] = None\n\n torch.save(model_dict, path)\n\n # post-training optimization\n optimize_and_save_model(model, params, path, onnx_export=onnx_export)\n\n\ndef load_model(\n path: str, device: torch.device, full_sanity_check: bool = True\n) -> Dict[str, Any]:\n \"\"\"\n Load a model dictionary from a file.\n\n Args:\n path (str): The path to save the model dictionary to.\n device (torch.device): The device to run the model on.\n full_sanity_check (bool): Whether to run full sanity checking on the model.\n\n Returns:\n dict: Model dictionary containing model parameters and metadata.\n \"\"\"\n model_dict = torch.load(path, map_location=device)\n\n # check if the model dictionary is complete\n if full_sanity_check:\n incomplete_keys = [\n key for key in model_dict_full.keys() if key not in model_dict.keys()\n ]\n if len(incomplete_keys) > 0:\n raise RuntimeWarning(\n \"Model dictionary is incomplete; the following keys are missing:\",\n incomplete_keys,\n )\n\n # check if required keys are absent, and if so raise an error\n incomplete_required_keys = [\n key for key in model_dict_required.keys() if key not in model_dict.keys()\n ]\n if len(incomplete_required_keys) > 0:\n raise KeyError(\n \"Model dictionary is incomplete; the following keys are missing:\",\n incomplete_required_keys,\n )\n\n return model_dict\n\n\ndef load_ov_model(path: str, device: str = \"CPU\"):\n \"\"\"\n Load an OpenVINO IR model from an .xml file.\n\n Args:\n path (str): The path to the OpenVINO .xml file.\n device (str): The device to run inference, can be \"CPU\", \"GPU\" or \"MULTI:CPU,GPU\". Default to be \"CPU\".\n\n Returns:\n exec_net (OpenVINO executable net): executable OpenVINO model.\n input_blob (str): Input name.\n output_blob (str): Output name.\n \"\"\"\n\n try:\n from openvino import runtime as ov\n except ImportError:\n raise ImportError(\"OpenVINO inference engine is not configured correctly.\")\n\n core = ov.Core()\n if device.lower() == \"cuda\":\n device = \"GPU\"\n\n if device == \"GPU\":\n core.set_property({\"CACHE_DIR\": os.path.dirname(os.path.abspath(path))})\n\n model = core.read_model(model=path, weights=path.replace(\"xml\", \"bin\"))\n compiled_model = core.compile_model(model=model, device_name=device.upper())\n input_layer = compiled_model.inputs\n output_layer = compiled_model.outputs\n\n return compiled_model, input_layer, output_layer\n", "path": "GANDLF/utils/modelio.py"}]} | 3,492 | 208 |
gh_patches_debug_22600 | rasdani/github-patches | git_diff | pyca__cryptography-1349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_truncate_digest_for_ecdsa in the OpenSSL document should be commented
Right now there are no comments explaining why it exists, or what it's attempting to do.
</issue>
<code>
[start of cryptography/hazmat/backends/openssl/ec.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import six
17
18 from cryptography import utils
19 from cryptography.exceptions import (
20 InvalidSignature, UnsupportedAlgorithm, _Reasons
21 )
22 from cryptography.hazmat.primitives import hashes, interfaces
23 from cryptography.hazmat.primitives.asymmetric import ec
24
25
26 def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):
27 _lib = backend._lib
28 _ffi = backend._ffi
29
30 digest_len = len(digest)
31
32 group = _lib.EC_KEY_get0_group(ec_key_cdata)
33
34 bn_ctx = _lib.BN_CTX_new()
35 assert bn_ctx != _ffi.NULL
36 bn_ctx = _ffi.gc(bn_ctx, _lib.BN_CTX_free)
37
38 order = _lib.BN_CTX_get(bn_ctx)
39 assert order != _ffi.NULL
40
41 res = _lib.EC_GROUP_get_order(group, order, bn_ctx)
42 assert res == 1
43
44 order_bits = _lib.BN_num_bits(order)
45
46 if 8 * digest_len > order_bits:
47 digest_len = (order_bits + 7) // 8
48 digest = digest[:digest_len]
49
50 if 8 * digest_len > order_bits:
51 rshift = 8 - (order_bits & 0x7)
52 assert rshift > 0 and rshift < 8
53
54 mask = 0xFF >> rshift << rshift
55
56 # Set the bottom rshift bits to 0
57 digest = digest[:-1] + six.int2byte(six.indexbytes(digest, -1) & mask)
58
59 return digest
60
61
62 @utils.register_interface(interfaces.AsymmetricSignatureContext)
63 class _ECDSASignatureContext(object):
64 def __init__(self, backend, private_key, algorithm):
65 self._backend = backend
66 self._private_key = private_key
67 self._digest = hashes.Hash(algorithm, backend)
68
69 def update(self, data):
70 self._digest.update(data)
71
72 def finalize(self):
73 ec_key = self._private_key._ec_key
74
75 digest = self._digest.finalize()
76
77 digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)
78
79 max_size = self._backend._lib.ECDSA_size(ec_key)
80 assert max_size > 0
81
82 sigbuf = self._backend._ffi.new("char[]", max_size)
83 siglen_ptr = self._backend._ffi.new("unsigned int[]", 1)
84 res = self._backend._lib.ECDSA_sign(
85 0,
86 digest,
87 len(digest),
88 sigbuf,
89 siglen_ptr,
90 ec_key
91 )
92 assert res == 1
93 return self._backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]
94
95
96 @utils.register_interface(interfaces.AsymmetricVerificationContext)
97 class _ECDSAVerificationContext(object):
98 def __init__(self, backend, public_key, signature, algorithm):
99 self._backend = backend
100 self._public_key = public_key
101 self._signature = signature
102 self._digest = hashes.Hash(algorithm, backend)
103
104 def update(self, data):
105 self._digest.update(data)
106
107 def verify(self):
108 ec_key = self._public_key._ec_key
109
110 digest = self._digest.finalize()
111
112 digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)
113
114 res = self._backend._lib.ECDSA_verify(
115 0,
116 digest,
117 len(digest),
118 self._signature,
119 len(self._signature),
120 ec_key
121 )
122 if res != 1:
123 self._backend._consume_errors()
124 raise InvalidSignature
125 return True
126
127
128 @utils.register_interface(interfaces.EllipticCurvePrivateKey)
129 class _EllipticCurvePrivateKey(object):
130 def __init__(self, backend, ec_key_cdata, curve):
131 self._backend = backend
132 self._ec_key = ec_key_cdata
133 self._curve = curve
134
135 @property
136 def curve(self):
137 return self._curve
138
139 def signer(self, signature_algorithm):
140 if isinstance(signature_algorithm, ec.ECDSA):
141 return _ECDSASignatureContext(
142 self._backend, self, signature_algorithm.algorithm
143 )
144 else:
145 raise UnsupportedAlgorithm(
146 "Unsupported elliptic curve signature algorithm.",
147 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
148
149 def public_key(self):
150 group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
151 assert group != self._backend._ffi.NULL
152
153 curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)
154
155 public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)
156 assert public_ec_key != self._backend._ffi.NULL
157 public_ec_key = self._backend._ffi.gc(
158 public_ec_key, self._backend._lib.EC_KEY_free
159 )
160
161 point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
162 assert point != self._backend._ffi.NULL
163
164 res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)
165 assert res == 1
166
167 return _EllipticCurvePublicKey(
168 self._backend, public_ec_key, self._curve
169 )
170
171
172 @utils.register_interface(interfaces.EllipticCurvePublicKey)
173 class _EllipticCurvePublicKey(object):
174 def __init__(self, backend, ec_key_cdata, curve):
175 self._backend = backend
176 self._ec_key = ec_key_cdata
177 self._curve = curve
178
179 @property
180 def curve(self):
181 return self._curve
182
183 def verifier(self, signature, signature_algorithm):
184 if isinstance(signature_algorithm, ec.ECDSA):
185 return _ECDSAVerificationContext(
186 self._backend, self, signature, signature_algorithm.algorithm
187 )
188 else:
189 raise UnsupportedAlgorithm(
190 "Unsupported elliptic curve signature algorithm.",
191 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
192
[end of cryptography/hazmat/backends/openssl/ec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cryptography/hazmat/backends/openssl/ec.py b/cryptography/hazmat/backends/openssl/ec.py
--- a/cryptography/hazmat/backends/openssl/ec.py
+++ b/cryptography/hazmat/backends/openssl/ec.py
@@ -24,6 +24,13 @@
def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):
+ """
+ This function truncates digests that are longer than a given elliptic
+ curve key's length so they can be signed. Since elliptic curve keys are
+ much shorter than RSA keys many digests (e.g. SHA-512) may require
+ truncation.
+ """
+
_lib = backend._lib
_ffi = backend._ffi
@@ -31,17 +38,14 @@
group = _lib.EC_KEY_get0_group(ec_key_cdata)
- bn_ctx = _lib.BN_CTX_new()
- assert bn_ctx != _ffi.NULL
- bn_ctx = _ffi.gc(bn_ctx, _lib.BN_CTX_free)
-
- order = _lib.BN_CTX_get(bn_ctx)
- assert order != _ffi.NULL
+ with backend._bn_ctx_manager() as bn_ctx:
+ order = _lib.BN_CTX_get(bn_ctx)
+ assert order != _ffi.NULL
- res = _lib.EC_GROUP_get_order(group, order, bn_ctx)
- assert res == 1
+ res = _lib.EC_GROUP_get_order(group, order, bn_ctx)
+ assert res == 1
- order_bits = _lib.BN_num_bits(order)
+ order_bits = _lib.BN_num_bits(order)
if 8 * digest_len > order_bits:
digest_len = (order_bits + 7) // 8
| {"golden_diff": "diff --git a/cryptography/hazmat/backends/openssl/ec.py b/cryptography/hazmat/backends/openssl/ec.py\n--- a/cryptography/hazmat/backends/openssl/ec.py\n+++ b/cryptography/hazmat/backends/openssl/ec.py\n@@ -24,6 +24,13 @@\n \n \n def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):\n+ \"\"\"\n+ This function truncates digests that are longer than a given elliptic\n+ curve key's length so they can be signed. Since elliptic curve keys are\n+ much shorter than RSA keys many digests (e.g. SHA-512) may require\n+ truncation.\n+ \"\"\"\n+\n _lib = backend._lib\n _ffi = backend._ffi\n \n@@ -31,17 +38,14 @@\n \n group = _lib.EC_KEY_get0_group(ec_key_cdata)\n \n- bn_ctx = _lib.BN_CTX_new()\n- assert bn_ctx != _ffi.NULL\n- bn_ctx = _ffi.gc(bn_ctx, _lib.BN_CTX_free)\n-\n- order = _lib.BN_CTX_get(bn_ctx)\n- assert order != _ffi.NULL\n+ with backend._bn_ctx_manager() as bn_ctx:\n+ order = _lib.BN_CTX_get(bn_ctx)\n+ assert order != _ffi.NULL\n \n- res = _lib.EC_GROUP_get_order(group, order, bn_ctx)\n- assert res == 1\n+ res = _lib.EC_GROUP_get_order(group, order, bn_ctx)\n+ assert res == 1\n \n- order_bits = _lib.BN_num_bits(order)\n+ order_bits = _lib.BN_num_bits(order)\n \n if 8 * digest_len > order_bits:\n digest_len = (order_bits + 7) // 8\n", "issue": "_truncate_digest_for_ecdsa in the OpenSSL document should be commented\nRight now there are no comments explaining why it exists, or what it's attempting to do.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InvalidSignature, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.primitives import hashes, interfaces\nfrom cryptography.hazmat.primitives.asymmetric import ec\n\n\ndef _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):\n _lib = backend._lib\n _ffi = backend._ffi\n\n digest_len = len(digest)\n\n group = _lib.EC_KEY_get0_group(ec_key_cdata)\n\n bn_ctx = _lib.BN_CTX_new()\n assert bn_ctx != _ffi.NULL\n bn_ctx = _ffi.gc(bn_ctx, _lib.BN_CTX_free)\n\n order = _lib.BN_CTX_get(bn_ctx)\n assert order != _ffi.NULL\n\n res = _lib.EC_GROUP_get_order(group, order, bn_ctx)\n assert res == 1\n\n order_bits = _lib.BN_num_bits(order)\n\n if 8 * digest_len > order_bits:\n digest_len = (order_bits + 7) // 8\n digest = digest[:digest_len]\n\n if 8 * digest_len > order_bits:\n rshift = 8 - (order_bits & 0x7)\n assert rshift > 0 and rshift < 8\n\n mask = 0xFF >> rshift << rshift\n\n # Set the bottom rshift bits to 0\n digest = digest[:-1] + six.int2byte(six.indexbytes(digest, -1) & mask)\n\n return digest\n\n\[email protected]_interface(interfaces.AsymmetricSignatureContext)\nclass _ECDSASignatureContext(object):\n def __init__(self, backend, private_key, algorithm):\n self._backend = backend\n self._private_key = private_key\n self._digest = hashes.Hash(algorithm, backend)\n\n def update(self, data):\n self._digest.update(data)\n\n def finalize(self):\n ec_key = self._private_key._ec_key\n\n digest = self._digest.finalize()\n\n digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)\n\n max_size = self._backend._lib.ECDSA_size(ec_key)\n assert max_size > 0\n\n sigbuf = self._backend._ffi.new(\"char[]\", max_size)\n siglen_ptr = self._backend._ffi.new(\"unsigned int[]\", 1)\n res = self._backend._lib.ECDSA_sign(\n 0,\n digest,\n len(digest),\n sigbuf,\n siglen_ptr,\n ec_key\n )\n assert res == 1\n return self._backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]\n\n\[email protected]_interface(interfaces.AsymmetricVerificationContext)\nclass _ECDSAVerificationContext(object):\n def __init__(self, backend, public_key, signature, algorithm):\n self._backend = backend\n self._public_key = public_key\n self._signature = signature\n self._digest = hashes.Hash(algorithm, backend)\n\n def update(self, data):\n self._digest.update(data)\n\n def verify(self):\n ec_key = self._public_key._ec_key\n\n digest = self._digest.finalize()\n\n digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)\n\n res = self._backend._lib.ECDSA_verify(\n 0,\n digest,\n len(digest),\n self._signature,\n len(self._signature),\n ec_key\n )\n if res != 1:\n self._backend._consume_errors()\n raise InvalidSignature\n return True\n\n\[email protected]_interface(interfaces.EllipticCurvePrivateKey)\nclass _EllipticCurvePrivateKey(object):\n def __init__(self, backend, ec_key_cdata, curve):\n self._backend = backend\n self._ec_key = ec_key_cdata\n self._curve = curve\n\n @property\n def curve(self):\n return self._curve\n\n def signer(self, signature_algorithm):\n if isinstance(signature_algorithm, ec.ECDSA):\n return _ECDSASignatureContext(\n self._backend, self, signature_algorithm.algorithm\n )\n else:\n raise UnsupportedAlgorithm(\n \"Unsupported elliptic curve signature algorithm.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def public_key(self):\n group = self._backend._lib.EC_KEY_get0_group(self._ec_key)\n assert group != self._backend._ffi.NULL\n\n curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)\n\n public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert public_ec_key != self._backend._ffi.NULL\n public_ec_key = self._backend._ffi.gc(\n public_ec_key, self._backend._lib.EC_KEY_free\n )\n\n point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)\n assert point != self._backend._ffi.NULL\n\n res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)\n assert res == 1\n\n return _EllipticCurvePublicKey(\n self._backend, public_ec_key, self._curve\n )\n\n\[email protected]_interface(interfaces.EllipticCurvePublicKey)\nclass _EllipticCurvePublicKey(object):\n def __init__(self, backend, ec_key_cdata, curve):\n self._backend = backend\n self._ec_key = ec_key_cdata\n self._curve = curve\n\n @property\n def curve(self):\n return self._curve\n\n def verifier(self, signature, signature_algorithm):\n if isinstance(signature_algorithm, ec.ECDSA):\n return _ECDSAVerificationContext(\n self._backend, self, signature, signature_algorithm.algorithm\n )\n else:\n raise UnsupportedAlgorithm(\n \"Unsupported elliptic curve signature algorithm.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n", "path": "cryptography/hazmat/backends/openssl/ec.py"}]} | 2,497 | 411 |
gh_patches_debug_26665 | rasdani/github-patches | git_diff | netbox-community__netbox-11076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Beta: CachedValue search values may (and do) exceed maximum PostgreSQL B-Tree index row size
### NetBox version
v3.4-beta1
### Python version
3.10
### Steps to Reproduce
1. Have a NetBox instance with a lot of real-life data
2. Try to migrate data from NetBox 3.3.8 to 3.4-beta1
### Expected Behavior
Migrations succeed
### Observed Behavior
Migration `extras.0083_search` fails.
Log:
```bash
$ NETBOX_DELETE_LEGACY_DATA=1 netbox/manage.py migrate
netbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._device
netbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._rack
netbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._location
Operations to perform:
Apply all migrations: admin, auth, circuits, contenttypes, dcim, django_rq, extras, ipam, sessions, social_django, taggit, tenancy, users, virtualization, wireless
Running migrations:
Applying extras.0083_search...Reindexing 63 models.
Clearing cached values... 0 entries deleted.
Indexing models
circuits.circuit... 153 entries cached.
circuits.circuittermination... 234 entries cached.
circuits.circuittype... 20 entries cached.
circuits.provider... 17 entries cached.
circuits.providernetwork... 2 entries cached.
dcim.cable... 13284 entries cached.
dcim.consoleport... 6478 entries cached.
dcim.consoleserverport... 1378 entries cached.
dcim.device... 39290 entries cached.
dcim.devicebay... 422 entries cached.
dcim.devicerole... 68 entries cached.
dcim.devicetype... 1022 entries cached.
dcim.frontport... 21296 entries cached.
dcim.interface... 805260 entries cached.
dcim.inventoryitem... None found.
dcim.location... 469 entries cached.
dcim.manufacturer... 144 entries cached.
dcim.module... 40 entries cached.
dcim.modulebay... 136 entries cached.
dcim.moduletype... 24 entries cached.
dcim.platform... 212 entries cached.
dcim.powerfeed... 48 entries cached.
dcim.poweroutlet... 464 entries cached.
dcim.powerpanel... 3 entries cached.
dcim.powerport... 33112 entries cached.
dcim.rack... 7741 entries cached.
dcim.rackreservation... 223 entries cached.
dcim.rackrole... 33 entries cached.
dcim.rearport... 17705 entries cached.
dcim.region... 39 entries cached.
dcim.site... 270 entries cached.
dcim.sitegroup... 18 entries cached.
dcim.virtualchassis... 347 entries cached.
dcim.virtualdevicecontext... None found.
ipam.aggregate... 647 entries cached.
ipam.asn... 18 entries cached.
ipam.fhrpgroup... None found.
ipam.ipaddress... 159974 entries cached.
ipam.iprange... None found.
ipam.l2vpn... None found.
ipam.prefix... 43320 entries cached.
ipam.rir... 12 entries cached.
ipam.role... 9 entries cached.
ipam.routetarget... 129 entries cached.
ipam.service... None found.
ipam.servicetemplate... None found.
ipam.vlan... 30219 entries cached.
ipam.vlangroup... 298 entries cached.
ipam.vrf... 7301 entries cached.
extras.journalentry... 23594 entries cached.
tenancy.contact... 24 entries cached.
tenancy.contactgroup... 2 entries cached.
tenancy.contactrole... 6 entries cached.
tenancy.tenant... 14755 entries cached.
tenancy.tenantgroup... 9 entries cached.
virtualization.cluster... 257 entries cached.
virtualization.clustergroup... 161 entries cached.
virtualization.clustertype... 14 entries cached.
virtualization.virtualmachine... 25878 entries cached.
virtualization.vminterface... 13559 entries cached.
wireless.wirelesslan... None found.
wireless.wirelesslangroup... None found.
wireless.wirelesslink... None found.
Completed. Total entries: 1270138
Traceback (most recent call last):
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.ProgramLimitExceeded: FEHLER: Indexzeile benötigt 122744 Bytes, Maximalgröße ist 8191
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/var/dev/netbox/netbox/manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
utility.execute()
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py", line 402, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py", line 448, in execute
output = self.handle(*args, **options)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py", line 96, in wrapped
res = handle_func(*args, **kwargs)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/commands/migrate.py", line 349, in handle
post_migrate_state = executor.migrate(
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py", line 135, in migrate
state = self._migrate_all_forwards(
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py", line 167, in _migrate_all_forwards
state = self.apply_migration(
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py", line 249, in apply_migration
with self.connection.schema_editor(
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/base/schema.py", line 164, in __exit__
self.execute(sql)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/base/schema.py", line 199, in execute
cursor.execute(sql, params)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 103, in execute
return super().execute(sql, params)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
with self.db.wrap_database_errors:
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/utils.py", line 91, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: FEHLER: Indexzeile benötigt 122744 Bytes, Maximalgröße ist 8191
```
which translates to: `ERROR: index row requires 122744 bytes, maximum size is 8191`
This seems to be the issue from the perspective of the migration:
* https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/migrations/0083_search.py#L12
* causes run of: https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/management/commands/reindex.py
* causes run of: https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/netbox/search/backends.py#L138
As I am not a database engineer, I'm somewhat guessing my way through here, so in the case, I've missed an obvious configuration value, please have that in mind.
Django seems to use B-Tree indexes in PostgreSQL by default: https://docs.djangoproject.com/en/4.1/ref/models/indexes/#index-options
B-Tree indexes are described in PostgreSQL-docs with a size limit:
> ["The only limitation is that an index entry cannot exceed approximately one-third of a page (after TOAST compression, if applicable)."](https://www.postgresql.org/docs/current/btree-intro.html)
From my understanding of the matter, the size is exceeded as a [CachedValue.value](https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/models/search.py#L39) may be too large for a B-Tree index row.
</issue>
<code>
[start of netbox/extras/migrations/0083_search.py]
1 import sys
2 import uuid
3
4 import django.db.models.deletion
5 from django.core import management
6 from django.db import migrations, models
7
8
9 def reindex(apps, schema_editor):
10 # Build the search index (except during tests)
11 if 'test' not in sys.argv:
12 management.call_command('reindex')
13
14
15 class Migration(migrations.Migration):
16
17 dependencies = [
18 ('circuits', '0041_standardize_description_comments'),
19 ('contenttypes', '0002_remove_content_type_name'),
20 ('dcim', '0166_virtualdevicecontext'),
21 ('extras', '0082_savedfilter'),
22 ('ipam', '0063_standardize_description_comments'),
23 ('tenancy', '0009_standardize_description_comments'),
24 ('virtualization', '0034_standardize_description_comments'),
25 ('wireless', '0008_wirelesslan_status'),
26 ]
27
28 operations = [
29 migrations.AddField(
30 model_name='customfield',
31 name='search_weight',
32 field=models.PositiveSmallIntegerField(default=1000),
33 ),
34 migrations.CreateModel(
35 name='CachedValue',
36 fields=[
37 ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
38 ('timestamp', models.DateTimeField(auto_now_add=True)),
39 ('object_id', models.PositiveBigIntegerField()),
40 ('field', models.CharField(max_length=200)),
41 ('type', models.CharField(max_length=30)),
42 ('value', models.TextField(db_index=True)),
43 ('weight', models.PositiveSmallIntegerField(default=1000)),
44 ('object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),
45 ],
46 options={
47 'ordering': ('weight', 'object_type', 'object_id'),
48 },
49 ),
50 migrations.RunPython(
51 code=reindex,
52 reverse_code=migrations.RunPython.noop
53 ),
54 ]
55
[end of netbox/extras/migrations/0083_search.py]
[start of netbox/extras/models/search.py]
1 import uuid
2
3 from django.contrib.contenttypes.models import ContentType
4 from django.db import models
5
6 from utilities.fields import RestrictedGenericForeignKey
7
8 __all__ = (
9 'CachedValue',
10 )
11
12
13 class CachedValue(models.Model):
14 id = models.UUIDField(
15 primary_key=True,
16 default=uuid.uuid4,
17 editable=False
18 )
19 timestamp = models.DateTimeField(
20 auto_now_add=True,
21 editable=False
22 )
23 object_type = models.ForeignKey(
24 to=ContentType,
25 on_delete=models.CASCADE,
26 related_name='+'
27 )
28 object_id = models.PositiveBigIntegerField()
29 object = RestrictedGenericForeignKey(
30 ct_field='object_type',
31 fk_field='object_id'
32 )
33 field = models.CharField(
34 max_length=200
35 )
36 type = models.CharField(
37 max_length=30
38 )
39 value = models.TextField(
40 db_index=True
41 )
42 weight = models.PositiveSmallIntegerField(
43 default=1000
44 )
45
46 class Meta:
47 ordering = ('weight', 'object_type', 'object_id')
48
49 def __str__(self):
50 return f'{self.object_type} {self.object_id}: {self.field}={self.value}'
51
[end of netbox/extras/models/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/migrations/0083_search.py b/netbox/extras/migrations/0083_search.py
--- a/netbox/extras/migrations/0083_search.py
+++ b/netbox/extras/migrations/0083_search.py
@@ -2,6 +2,7 @@
import uuid
import django.db.models.deletion
+import django.db.models.lookups
from django.core import management
from django.db import migrations, models
@@ -39,7 +40,7 @@
('object_id', models.PositiveBigIntegerField()),
('field', models.CharField(max_length=200)),
('type', models.CharField(max_length=30)),
- ('value', models.TextField(db_index=True)),
+ ('value', models.TextField()),
('weight', models.PositiveSmallIntegerField(default=1000)),
('object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),
],
diff --git a/netbox/extras/models/search.py b/netbox/extras/models/search.py
--- a/netbox/extras/models/search.py
+++ b/netbox/extras/models/search.py
@@ -36,9 +36,7 @@
type = models.CharField(
max_length=30
)
- value = models.TextField(
- db_index=True
- )
+ value = models.TextField()
weight = models.PositiveSmallIntegerField(
default=1000
)
| {"golden_diff": "diff --git a/netbox/extras/migrations/0083_search.py b/netbox/extras/migrations/0083_search.py\n--- a/netbox/extras/migrations/0083_search.py\n+++ b/netbox/extras/migrations/0083_search.py\n@@ -2,6 +2,7 @@\n import uuid\n \n import django.db.models.deletion\n+import django.db.models.lookups\n from django.core import management\n from django.db import migrations, models\n \n@@ -39,7 +40,7 @@\n ('object_id', models.PositiveBigIntegerField()),\n ('field', models.CharField(max_length=200)),\n ('type', models.CharField(max_length=30)),\n- ('value', models.TextField(db_index=True)),\n+ ('value', models.TextField()),\n ('weight', models.PositiveSmallIntegerField(default=1000)),\n ('object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),\n ],\ndiff --git a/netbox/extras/models/search.py b/netbox/extras/models/search.py\n--- a/netbox/extras/models/search.py\n+++ b/netbox/extras/models/search.py\n@@ -36,9 +36,7 @@\n type = models.CharField(\n max_length=30\n )\n- value = models.TextField(\n- db_index=True\n- )\n+ value = models.TextField()\n weight = models.PositiveSmallIntegerField(\n default=1000\n )\n", "issue": "Beta: CachedValue search values may (and do) exceed maximum PostgreSQL B-Tree index row size\n### NetBox version\n\nv3.4-beta1\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. Have a NetBox instance with a lot of real-life data\r\n2. Try to migrate data from NetBox 3.3.8 to 3.4-beta1\n\n### Expected Behavior\n\nMigrations succeed\n\n### Observed Behavior\n\nMigration `extras.0083_search` fails.\r\n\r\nLog:\r\n```bash\r\n$ NETBOX_DELETE_LEGACY_DATA=1 netbox/manage.py migrate\r\nnetbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._device\r\nnetbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._rack\r\nnetbox.denormalized: DEBUG Registering denormalized field <class 'dcim.models.cables.CableTermination'>._location\r\nOperations to perform:\r\n Apply all migrations: admin, auth, circuits, contenttypes, dcim, django_rq, extras, ipam, sessions, social_django, taggit, tenancy, users, virtualization, wireless\r\nRunning migrations:\r\n Applying extras.0083_search...Reindexing 63 models.\r\nClearing cached values... 0 entries deleted.\r\nIndexing models\r\n circuits.circuit... 153 entries cached.\r\n circuits.circuittermination... 234 entries cached.\r\n circuits.circuittype... 20 entries cached.\r\n circuits.provider... 17 entries cached.\r\n circuits.providernetwork... 2 entries cached.\r\n dcim.cable... 13284 entries cached.\r\n dcim.consoleport... 6478 entries cached.\r\n dcim.consoleserverport... 1378 entries cached.\r\n dcim.device... 39290 entries cached.\r\n dcim.devicebay... 422 entries cached.\r\n dcim.devicerole... 68 entries cached.\r\n dcim.devicetype... 1022 entries cached.\r\n dcim.frontport... 21296 entries cached.\r\n dcim.interface... 805260 entries cached.\r\n dcim.inventoryitem... None found.\r\n dcim.location... 469 entries cached.\r\n dcim.manufacturer... 144 entries cached.\r\n dcim.module... 40 entries cached.\r\n dcim.modulebay... 136 entries cached.\r\n dcim.moduletype... 24 entries cached.\r\n dcim.platform... 212 entries cached.\r\n dcim.powerfeed... 48 entries cached.\r\n dcim.poweroutlet... 464 entries cached.\r\n dcim.powerpanel... 3 entries cached.\r\n dcim.powerport... 33112 entries cached.\r\n dcim.rack... 7741 entries cached.\r\n dcim.rackreservation... 223 entries cached.\r\n dcim.rackrole... 33 entries cached.\r\n dcim.rearport... 17705 entries cached.\r\n dcim.region... 39 entries cached.\r\n dcim.site... 270 entries cached.\r\n dcim.sitegroup... 18 entries cached.\r\n dcim.virtualchassis... 347 entries cached.\r\n dcim.virtualdevicecontext... None found.\r\n ipam.aggregate... 647 entries cached.\r\n ipam.asn... 18 entries cached.\r\n ipam.fhrpgroup... None found.\r\n ipam.ipaddress... 159974 entries cached.\r\n ipam.iprange... None found.\r\n ipam.l2vpn... None found.\r\n ipam.prefix... 43320 entries cached.\r\n ipam.rir... 12 entries cached.\r\n ipam.role... 9 entries cached.\r\n ipam.routetarget... 129 entries cached.\r\n ipam.service... None found.\r\n ipam.servicetemplate... None found.\r\n ipam.vlan... 30219 entries cached.\r\n ipam.vlangroup... 298 entries cached.\r\n ipam.vrf... 7301 entries cached.\r\n extras.journalentry... 23594 entries cached.\r\n tenancy.contact... 24 entries cached.\r\n tenancy.contactgroup... 2 entries cached.\r\n tenancy.contactrole... 6 entries cached.\r\n tenancy.tenant... 14755 entries cached.\r\n tenancy.tenantgroup... 9 entries cached.\r\n virtualization.cluster... 257 entries cached.\r\n virtualization.clustergroup... 161 entries cached.\r\n virtualization.clustertype... 14 entries cached.\r\n virtualization.virtualmachine... 25878 entries cached.\r\n virtualization.vminterface... 13559 entries cached.\r\n wireless.wirelesslan... None found.\r\n wireless.wirelesslangroup... None found.\r\n wireless.wirelesslink... None found.\r\nCompleted. Total entries: 1270138\r\nTraceback (most recent call last):\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.errors.ProgramLimitExceeded: FEHLER: Indexzeile ben\u00f6tigt 122744 Bytes, Maximalgr\u00f6\u00dfe ist 8191\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/var/dev/netbox/netbox/manage.py\", line 10, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/__init__.py\", line 446, in execute_from_command_line\r\n utility.execute()\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/__init__.py\", line 440, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py\", line 402, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py\", line 448, in execute\r\n output = self.handle(*args, **options)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/base.py\", line 96, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/core/management/commands/migrate.py\", line 349, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py\", line 135, in migrate\r\n state = self._migrate_all_forwards(\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py\", line 167, in _migrate_all_forwards\r\n state = self.apply_migration(\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/migrations/executor.py\", line 249, in apply_migration\r\n with self.connection.schema_editor(\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/base/schema.py\", line 164, in __exit__\r\n self.execute(sql)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/base/schema.py\", line 199, in execute\r\n cursor.execute(sql, params)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 103, in execute\r\n return super().execute(sql, params)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/var/dev/netbox/venv/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\ndjango.db.utils.OperationalError: FEHLER: Indexzeile ben\u00f6tigt 122744 Bytes, Maximalgr\u00f6\u00dfe ist 8191\r\n```\r\nwhich translates to: `ERROR: index row requires 122744 bytes, maximum size is 8191`\r\n\r\nThis seems to be the issue from the perspective of the migration:\r\n* https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/migrations/0083_search.py#L12\r\n* causes run of: https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/management/commands/reindex.py\r\n* causes run of: https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/netbox/search/backends.py#L138\r\n\r\nAs I am not a database engineer, I'm somewhat guessing my way through here, so in the case, I've missed an obvious configuration value, please have that in mind.\r\n\r\nDjango seems to use B-Tree indexes in PostgreSQL by default: https://docs.djangoproject.com/en/4.1/ref/models/indexes/#index-options\r\n\r\nB-Tree indexes are described in PostgreSQL-docs with a size limit:\r\n> [\"The only limitation is that an index entry cannot exceed approximately one-third of a page (after TOAST compression, if applicable).\"](https://www.postgresql.org/docs/current/btree-intro.html)\r\n\r\nFrom my understanding of the matter, the size is exceeded as a [CachedValue.value](https://github.com/netbox-community/netbox/blob/v3.4-beta1/netbox/extras/models/search.py#L39) may be too large for a B-Tree index row.\n", "before_files": [{"content": "import sys\nimport uuid\n\nimport django.db.models.deletion\nfrom django.core import management\nfrom django.db import migrations, models\n\n\ndef reindex(apps, schema_editor):\n # Build the search index (except during tests)\n if 'test' not in sys.argv:\n management.call_command('reindex')\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('circuits', '0041_standardize_description_comments'),\n ('contenttypes', '0002_remove_content_type_name'),\n ('dcim', '0166_virtualdevicecontext'),\n ('extras', '0082_savedfilter'),\n ('ipam', '0063_standardize_description_comments'),\n ('tenancy', '0009_standardize_description_comments'),\n ('virtualization', '0034_standardize_description_comments'),\n ('wireless', '0008_wirelesslan_status'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='customfield',\n name='search_weight',\n field=models.PositiveSmallIntegerField(default=1000),\n ),\n migrations.CreateModel(\n name='CachedValue',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('object_id', models.PositiveBigIntegerField()),\n ('field', models.CharField(max_length=200)),\n ('type', models.CharField(max_length=30)),\n ('value', models.TextField(db_index=True)),\n ('weight', models.PositiveSmallIntegerField(default=1000)),\n ('object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.contenttype')),\n ],\n options={\n 'ordering': ('weight', 'object_type', 'object_id'),\n },\n ),\n migrations.RunPython(\n code=reindex,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n", "path": "netbox/extras/migrations/0083_search.py"}, {"content": "import uuid\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\n\nfrom utilities.fields import RestrictedGenericForeignKey\n\n__all__ = (\n 'CachedValue',\n)\n\n\nclass CachedValue(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n timestamp = models.DateTimeField(\n auto_now_add=True,\n editable=False\n )\n object_type = models.ForeignKey(\n to=ContentType,\n on_delete=models.CASCADE,\n related_name='+'\n )\n object_id = models.PositiveBigIntegerField()\n object = RestrictedGenericForeignKey(\n ct_field='object_type',\n fk_field='object_id'\n )\n field = models.CharField(\n max_length=200\n )\n type = models.CharField(\n max_length=30\n )\n value = models.TextField(\n db_index=True\n )\n weight = models.PositiveSmallIntegerField(\n default=1000\n )\n\n class Meta:\n ordering = ('weight', 'object_type', 'object_id')\n\n def __str__(self):\n return f'{self.object_type} {self.object_id}: {self.field}={self.value}'\n", "path": "netbox/extras/models/search.py"}]} | 3,878 | 325 |
gh_patches_debug_15180 | rasdani/github-patches | git_diff | pre-commit__pre-commit-38 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider using --no-checkout for cloning
I'd assume it is faster...
</issue>
<code>
[start of pre_commit/repository.py]
1
2 import contextlib
3 from plumbum import local
4
5 import pre_commit.constants as C
6 from pre_commit.clientlib.validate_manifest import validate_manifest
7 from pre_commit.hooks_workspace import in_hooks_workspace
8 from pre_commit.languages.all import languages
9 from pre_commit.util import cached_property
10
11
12 class Repository(object):
13 def __init__(self, repo_config):
14 self.repo_config = repo_config
15
16 @cached_property
17 def repo_url(self):
18 return self.repo_config['repo']
19
20 @cached_property
21 def sha(self):
22 return self.repo_config['sha']
23
24 @cached_property
25 def languages(self):
26 return set(filter(None, (
27 hook.get('language') for hook in self.hooks.values()
28 )))
29
30 @cached_property
31 def hooks(self):
32 return dict(
33 (hook['id'], dict(hook, **self.manifest[hook['id']]))
34 for hook in self.repo_config['hooks']
35 )
36
37 @cached_property
38 def manifest(self):
39 with self.in_checkout():
40 return dict(
41 (hook['id'], hook)
42 for hook in validate_manifest(C.MANIFEST_FILE)
43 )
44
45 @contextlib.contextmanager
46 def in_checkout(self):
47 with in_hooks_workspace():
48 # SMELL:
49 self.create()
50 with local.cwd(self.sha):
51 yield
52
53 def create(self):
54 with in_hooks_workspace():
55 if local.path(self.sha).exists():
56 # Project already exists, no reason to re-create it
57 return
58
59 local['git']['clone', self.repo_url, self.sha]()
60 with self.in_checkout():
61 local['git']['checkout', self.sha]()
62
63 def install(self):
64 with self.in_checkout():
65 for language in C.SUPPORTED_LANGUAGES:
66 if language in self.languages:
67 languages[language].install_environment()
68
69 def run_hook(self, hook_id, file_args):
70 with self.in_checkout():
71 hook = self.hooks[hook_id]
72 return languages[hook['language']].run_hook(hook, file_args)
[end of pre_commit/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/repository.py b/pre_commit/repository.py
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -56,7 +56,7 @@
# Project already exists, no reason to re-create it
return
- local['git']['clone', self.repo_url, self.sha]()
+ local['git']['clone', '--no-checkout', self.repo_url, self.sha]()
with self.in_checkout():
local['git']['checkout', self.sha]()
@@ -69,4 +69,4 @@
def run_hook(self, hook_id, file_args):
with self.in_checkout():
hook = self.hooks[hook_id]
- return languages[hook['language']].run_hook(hook, file_args)
\ No newline at end of file
+ return languages[hook['language']].run_hook(hook, file_args)
| {"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -56,7 +56,7 @@\n # Project already exists, no reason to re-create it\n return\n \n- local['git']['clone', self.repo_url, self.sha]()\n+ local['git']['clone', '--no-checkout', self.repo_url, self.sha]()\n with self.in_checkout():\n local['git']['checkout', self.sha]()\n \n@@ -69,4 +69,4 @@\n def run_hook(self, hook_id, file_args):\n with self.in_checkout():\n hook = self.hooks[hook_id]\n- return languages[hook['language']].run_hook(hook, file_args)\n\\ No newline at end of file\n+ return languages[hook['language']].run_hook(hook, file_args)\n", "issue": "Consider using --no-checkout for cloning\nI'd assume it is faster...\n\n", "before_files": [{"content": "\nimport contextlib\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit.clientlib.validate_manifest import validate_manifest\nfrom pre_commit.hooks_workspace import in_hooks_workspace\nfrom pre_commit.languages.all import languages\nfrom pre_commit.util import cached_property\n\n\nclass Repository(object):\n def __init__(self, repo_config):\n self.repo_config = repo_config\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(filter(None, (\n hook.get('language') for hook in self.hooks.values()\n )))\n\n @cached_property\n def hooks(self):\n return dict(\n (hook['id'], dict(hook, **self.manifest[hook['id']]))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n with self.in_checkout():\n return dict(\n (hook['id'], hook)\n for hook in validate_manifest(C.MANIFEST_FILE)\n )\n\n @contextlib.contextmanager\n def in_checkout(self):\n with in_hooks_workspace():\n # SMELL:\n self.create()\n with local.cwd(self.sha):\n yield\n\n def create(self):\n with in_hooks_workspace():\n if local.path(self.sha).exists():\n # Project already exists, no reason to re-create it\n return\n\n local['git']['clone', self.repo_url, self.sha]()\n with self.in_checkout():\n local['git']['checkout', self.sha]()\n\n def install(self):\n with self.in_checkout():\n for language in C.SUPPORTED_LANGUAGES:\n if language in self.languages:\n languages[language].install_environment()\n\n def run_hook(self, hook_id, file_args):\n with self.in_checkout():\n hook = self.hooks[hook_id]\n return languages[hook['language']].run_hook(hook, file_args)", "path": "pre_commit/repository.py"}]} | 1,119 | 190 |
gh_patches_debug_12470 | rasdani/github-patches | git_diff | joke2k__faker-759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Generating invalid cpf (brazillian ssn)
Faker is generating invalid checksum digits for cpf (brazillian ssn).
### Steps to reproduce
1. Create fake instance using localization "pt_BR"
1. Call fake.cpf()
### Expected behavior
It should generate a valid CPF.
### Actual behavior
It is generating a CPF with invalid checksum digits, in some cases.
</issue>
<code>
[start of faker/providers/ssn/pt_BR/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import unicode_literals
4 from .. import Provider as SsnProvider
5
6
7 def checksum(digits):
8 s = 0
9 p = len(digits) + 1
10 for i in range(0, len(digits)):
11 s += digits[i] * p
12 p -= 1
13
14 reminder = s % 11
15 if reminder == 0 or reminder == 1:
16 return 1
17 else:
18 return 11 - reminder
19
20
21 class Provider(SsnProvider):
22 """
23 Provider for Brazilian SSN also known in Brazil as CPF.
24 There are two methods Provider.ssn and Provider.cpf
25 The snn returns a valid number with numbers only
26 The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn
27 """
28
29 def ssn(self):
30 digits = self.generator.random.sample(range(10), 9)
31
32 dv = checksum(digits)
33 digits.append(dv)
34 digits.append(checksum(digits))
35
36 return ''.join(map(str, digits))
37
38 def cpf(self):
39 c = self.ssn()
40 return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]
41
[end of faker/providers/ssn/pt_BR/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py
--- a/faker/providers/ssn/pt_BR/__init__.py
+++ b/faker/providers/ssn/pt_BR/__init__.py
@@ -5,6 +5,12 @@
def checksum(digits):
+ """
+ Returns the checksum of CPF digits.
+ References to the algorithm:
+ https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo
+ https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm
+ """
s = 0
p = len(digits) + 1
for i in range(0, len(digits)):
@@ -13,7 +19,7 @@
reminder = s % 11
if reminder == 0 or reminder == 1:
- return 1
+ return 0
else:
return 11 - reminder
| {"golden_diff": "diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py\n--- a/faker/providers/ssn/pt_BR/__init__.py\n+++ b/faker/providers/ssn/pt_BR/__init__.py\n@@ -5,6 +5,12 @@\n \n \n def checksum(digits):\n+ \"\"\"\n+ Returns the checksum of CPF digits.\n+ References to the algorithm:\n+ https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo\n+ https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm\n+ \"\"\"\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n@@ -13,7 +19,7 @@\n \n reminder = s % 11\n if reminder == 0 or reminder == 1:\n- return 1\n+ return 0\n else:\n return 11 - reminder\n", "issue": "Generating invalid cpf (brazillian ssn)\nFaker is generating invalid checksum digits for cpf (brazillian ssn).\r\n\r\n### Steps to reproduce\r\n\r\n1. Create fake instance using localization \"pt_BR\"\r\n1. Call fake.cpf()\r\n\r\n### Expected behavior\r\n\r\nIt should generate a valid CPF.\r\n\r\n### Actual behavior\r\n\r\nIt is generating a CPF with invalid checksum digits, in some cases.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as SsnProvider\n\n\ndef checksum(digits):\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n s += digits[i] * p\n p -= 1\n\n reminder = s % 11\n if reminder == 0 or reminder == 1:\n return 1\n else:\n return 11 - reminder\n\n\nclass Provider(SsnProvider):\n \"\"\"\n Provider for Brazilian SSN also known in Brazil as CPF.\n There are two methods Provider.ssn and Provider.cpf\n The snn returns a valid number with numbers only\n The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn\n \"\"\"\n\n def ssn(self):\n digits = self.generator.random.sample(range(10), 9)\n\n dv = checksum(digits)\n digits.append(dv)\n digits.append(checksum(digits))\n\n return ''.join(map(str, digits))\n\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n", "path": "faker/providers/ssn/pt_BR/__init__.py"}]} | 983 | 246 |
gh_patches_debug_7971 | rasdani/github-patches | git_diff | ipython__ipython-4563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nbconvert: Default encoding problem on OS X
Greetings.
I am using IPython 1.1.0 via MacPorts on OSX 10.7.5. The following problem is reproducible on the master git branch (IPython 2.0.0-dev).
On any call to nbconvert, I get the following failure:
```
[NbConvertApp] Using existing profile dir: u'/Users/USERNAME_REDACTED/.ipython/profile_default'
[NbConvertApp] Converting notebook ticks.ipynb to html
[NbConvertApp] Support files will be in ticks_files/
Traceback (most recent call last):
File "/opt/local/bin/ipython", line 6, in <module>
start_ipython()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/__init__.py", line 118, in start_ipython
return launch_new_instance(argv=argv, **kwargs)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/config/application.py", line 545, in launch_instance
app.start()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/terminal/ipapp.py", line 358, in start
return self.subapp.start()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/nbconvertapp.py", line 267, in start
self.convert_notebooks()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/nbconvertapp.py", line 300, in convert_notebooks
output, resources = exporter.from_filename(notebook_filename, resources=resources)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/exporters/exporter.py", line 288, in from_filename
with io.open(filename) as f:
LookupError: unknown encoding:
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at [email protected]
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
c.Application.verbose_crash=True
```
This is an easy fix: I change the troublesome line such that it reads,
```
with io.open(filename, encoding='ascii') as f:
```
However, this ad hoc and likely a suboptimal solution. I wanted to bring this to the developers' attention and inquire about a proper solution. Thanks!
System info:
```
python -c "import IPython; print(IPython.sys_info())"
{'codename': 'An Afternoon Hack',
'commit_hash': '7c2ea3a',
'commit_source': 'installation',
'default_encoding': 'US-ASCII',
'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',
'ipython_version': '1.1.0',
'os_name': 'posix',
'platform': 'Darwin-11.4.2-x86_64-i386-64bit',
'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',
'sys_platform': 'darwin',
'sys_version': '2.7.6 (default, Nov 19 2013, 16:37:14) \n[GCC 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2336.11.00)]'}
```
</issue>
<code>
[start of IPython/nbconvert/exporters/exporter.py]
1 """This module defines Exporter, a highly configurable converter
2 that uses Jinja2 to export notebook files into different formats.
3 """
4
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16
17 from __future__ import print_function, absolute_import
18
19 # Stdlib imports
20 import io
21 import os
22 import copy
23 import collections
24 import datetime
25
26
27 # IPython imports
28 from IPython.config.configurable import LoggingConfigurable
29 from IPython.config import Config
30 from IPython.nbformat import current as nbformat
31 from IPython.utils.traitlets import MetaHasTraits, Unicode, List
32 from IPython.utils.importstring import import_item
33 from IPython.utils import text, py3compat
34
35 #-----------------------------------------------------------------------------
36 # Class
37 #-----------------------------------------------------------------------------
38
39 class ResourcesDict(collections.defaultdict):
40 def __missing__(self, key):
41 return ''
42
43
44 class Exporter(LoggingConfigurable):
45 """
46 Class containing methods that sequentially run a list of preprocessors on a
47 NotebookNode object and then return the modified NotebookNode object and
48 accompanying resources dict.
49 """
50
51 file_extension = Unicode(
52 'txt', config=True,
53 help="Extension of the file that should be written to disk"
54 )
55
56 #Configurability, allows the user to easily add filters and preprocessors.
57 preprocessors = List(config=True,
58 help="""List of preprocessors, by name or namespace, to enable.""")
59
60 _preprocessors = None
61
62 default_preprocessors = List(['IPython.nbconvert.preprocessors.coalesce_streams',
63 'IPython.nbconvert.preprocessors.SVG2PDFPreprocessor',
64 'IPython.nbconvert.preprocessors.ExtractOutputPreprocessor',
65 'IPython.nbconvert.preprocessors.CSSHTMLHeaderPreprocessor',
66 'IPython.nbconvert.preprocessors.RevealHelpPreprocessor',
67 'IPython.nbconvert.preprocessors.LatexPreprocessor',
68 'IPython.nbconvert.preprocessors.HighlightMagicsPreprocessor'],
69 config=True,
70 help="""List of preprocessors available by default, by name, namespace,
71 instance, or type.""")
72
73
74 def __init__(self, config=None, **kw):
75 """
76 Public constructor
77
78 Parameters
79 ----------
80 config : config
81 User configuration instance.
82 """
83 with_default_config = self.default_config
84 if config:
85 with_default_config.merge(config)
86
87 super(Exporter, self).__init__(config=with_default_config, **kw)
88
89 self._init_preprocessors()
90
91
92 @property
93 def default_config(self):
94 return Config()
95
96 @nbformat.docstring_nbformat_mod
97 def from_notebook_node(self, nb, resources=None, **kw):
98 """
99 Convert a notebook from a notebook node instance.
100
101 Parameters
102 ----------
103 nb : :class:`~{nbformat_mod}.nbbase.NotebookNode`
104 Notebook node
105 resources : dict
106 Additional resources that can be accessed read/write by
107 preprocessors and filters.
108 **kw
109 Ignored (?)
110 """
111 nb_copy = copy.deepcopy(nb)
112 resources = self._init_resources(resources)
113
114 # Preprocess
115 nb_copy, resources = self._preprocess(nb_copy, resources)
116
117 return nb_copy, resources
118
119
120 def from_filename(self, filename, resources=None, **kw):
121 """
122 Convert a notebook from a notebook file.
123
124 Parameters
125 ----------
126 filename : str
127 Full filename of the notebook file to open and convert.
128 """
129
130 # Pull the metadata from the filesystem.
131 if resources is None:
132 resources = ResourcesDict()
133 if not 'metadata' in resources or resources['metadata'] == '':
134 resources['metadata'] = ResourcesDict()
135 basename = os.path.basename(filename)
136 notebook_name = basename[:basename.rfind('.')]
137 resources['metadata']['name'] = notebook_name
138
139 modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
140 resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)
141
142 with io.open(filename) as f:
143 return self.from_notebook_node(nbformat.read(f, 'json'), resources=resources, **kw)
144
145
146 def from_file(self, file_stream, resources=None, **kw):
147 """
148 Convert a notebook from a notebook file.
149
150 Parameters
151 ----------
152 file_stream : file-like object
153 Notebook file-like object to convert.
154 """
155 return self.from_notebook_node(nbformat.read(file_stream, 'json'), resources=resources, **kw)
156
157
158 def register_preprocessor(self, preprocessor, enabled=False):
159 """
160 Register a preprocessor.
161 Preprocessors are classes that act upon the notebook before it is
162 passed into the Jinja templating engine. preprocessors are also
163 capable of passing additional information to the Jinja
164 templating engine.
165
166 Parameters
167 ----------
168 preprocessor : preprocessor
169 """
170 if preprocessor is None:
171 raise TypeError('preprocessor')
172 isclass = isinstance(preprocessor, type)
173 constructed = not isclass
174
175 # Handle preprocessor's registration based on it's type
176 if constructed and isinstance(preprocessor, py3compat.string_types):
177 # Preprocessor is a string, import the namespace and recursively call
178 # this register_preprocessor method
179 preprocessor_cls = import_item(preprocessor)
180 return self.register_preprocessor(preprocessor_cls, enabled)
181
182 if constructed and hasattr(preprocessor, '__call__'):
183 # Preprocessor is a function, no need to construct it.
184 # Register and return the preprocessor.
185 if enabled:
186 preprocessor.enabled = True
187 self._preprocessors.append(preprocessor)
188 return preprocessor
189
190 elif isclass and isinstance(preprocessor, MetaHasTraits):
191 # Preprocessor is configurable. Make sure to pass in new default for
192 # the enabled flag if one was specified.
193 self.register_preprocessor(preprocessor(parent=self), enabled)
194
195 elif isclass:
196 # Preprocessor is not configurable, construct it
197 self.register_preprocessor(preprocessor(), enabled)
198
199 else:
200 # Preprocessor is an instance of something without a __call__
201 # attribute.
202 raise TypeError('preprocessor')
203
204
205 def _init_preprocessors(self):
206 """
207 Register all of the preprocessors needed for this exporter, disabled
208 unless specified explicitly.
209 """
210 if self._preprocessors is None:
211 self._preprocessors = []
212
213 #Load default preprocessors (not necessarly enabled by default).
214 if self.default_preprocessors:
215 for preprocessor in self.default_preprocessors:
216 self.register_preprocessor(preprocessor)
217
218 #Load user preprocessors. Enable by default.
219 if self.preprocessors:
220 for preprocessor in self.preprocessors:
221 self.register_preprocessor(preprocessor, enabled=True)
222
223
224 def _init_resources(self, resources):
225
226 #Make sure the resources dict is of ResourcesDict type.
227 if resources is None:
228 resources = ResourcesDict()
229 if not isinstance(resources, ResourcesDict):
230 new_resources = ResourcesDict()
231 new_resources.update(resources)
232 resources = new_resources
233
234 #Make sure the metadata extension exists in resources
235 if 'metadata' in resources:
236 if not isinstance(resources['metadata'], ResourcesDict):
237 resources['metadata'] = ResourcesDict(resources['metadata'])
238 else:
239 resources['metadata'] = ResourcesDict()
240 if not resources['metadata']['name']:
241 resources['metadata']['name'] = 'Notebook'
242
243 #Set the output extension
244 resources['output_extension'] = self.file_extension
245 return resources
246
247
248 def _preprocess(self, nb, resources):
249 """
250 Preprocess the notebook before passing it into the Jinja engine.
251 To preprocess the notebook is to apply all of the
252
253 Parameters
254 ----------
255 nb : notebook node
256 notebook that is being exported.
257 resources : a dict of additional resources that
258 can be accessed read/write by preprocessors
259 """
260
261 # Do a copy.deepcopy first,
262 # we are never safe enough with what the preprocessors could do.
263 nbc = copy.deepcopy(nb)
264 resc = copy.deepcopy(resources)
265
266 #Run each preprocessor on the notebook. Carry the output along
267 #to each preprocessor
268 for preprocessor in self._preprocessors:
269 nbc, resc = preprocessor(nbc, resc)
270 return nbc, resc
271
[end of IPython/nbconvert/exporters/exporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/nbconvert/exporters/exporter.py b/IPython/nbconvert/exporters/exporter.py
--- a/IPython/nbconvert/exporters/exporter.py
+++ b/IPython/nbconvert/exporters/exporter.py
@@ -139,7 +139,7 @@
modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)
- with io.open(filename) as f:
+ with io.open(filename, encoding='utf-8') as f:
return self.from_notebook_node(nbformat.read(f, 'json'), resources=resources, **kw)
| {"golden_diff": "diff --git a/IPython/nbconvert/exporters/exporter.py b/IPython/nbconvert/exporters/exporter.py\n--- a/IPython/nbconvert/exporters/exporter.py\n+++ b/IPython/nbconvert/exporters/exporter.py\n@@ -139,7 +139,7 @@\n modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))\n resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)\n \n- with io.open(filename) as f:\n+ with io.open(filename, encoding='utf-8') as f:\n return self.from_notebook_node(nbformat.read(f, 'json'), resources=resources, **kw)\n", "issue": "nbconvert: Default encoding problem on OS X\nGreetings.\n\nI am using IPython 1.1.0 via MacPorts on OSX 10.7.5. The following problem is reproducible on the master git branch (IPython 2.0.0-dev).\n\nOn any call to nbconvert, I get the following failure:\n\n```\n[NbConvertApp] Using existing profile dir: u'/Users/USERNAME_REDACTED/.ipython/profile_default'\n[NbConvertApp] Converting notebook ticks.ipynb to html\n[NbConvertApp] Support files will be in ticks_files/\nTraceback (most recent call last):\n File \"/opt/local/bin/ipython\", line 6, in <module>\n start_ipython()\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/__init__.py\", line 118, in start_ipython\n return launch_new_instance(argv=argv, **kwargs)\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/config/application.py\", line 545, in launch_instance\n app.start()\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/terminal/ipapp.py\", line 358, in start\n return self.subapp.start()\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/nbconvertapp.py\", line 267, in start\n self.convert_notebooks()\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/nbconvertapp.py\", line 300, in convert_notebooks\n output, resources = exporter.from_filename(notebook_filename, resources=resources)\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython/nbconvert/exporters/exporter.py\", line 288, in from_filename\n with io.open(filename) as f:\nLookupError: unknown encoding: \n\nIf you suspect this is an IPython bug, please report it at:\n https://github.com/ipython/ipython/issues\nor send an email to the mailing list at [email protected]\n\nYou can print a more detailed traceback right now with \"%tb\", or use \"%debug\"\nto interactively debug it.\n\nExtra-detailed tracebacks for bug-reporting purposes can be enabled via:\n c.Application.verbose_crash=True\n```\n\nThis is an easy fix: I change the troublesome line such that it reads,\n\n```\nwith io.open(filename, encoding='ascii') as f:\n```\n\nHowever, this ad hoc and likely a suboptimal solution. I wanted to bring this to the developers' attention and inquire about a proper solution. Thanks!\n\nSystem info:\n\n```\npython -c \"import IPython; print(IPython.sys_info())\"\n{'codename': 'An Afternoon Hack',\n 'commit_hash': '7c2ea3a',\n 'commit_source': 'installation',\n 'default_encoding': 'US-ASCII',\n 'ipython_path': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/IPython',\n 'ipython_version': '1.1.0',\n 'os_name': 'posix',\n 'platform': 'Darwin-11.4.2-x86_64-i386-64bit',\n 'sys_executable': '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python',\n 'sys_platform': 'darwin',\n 'sys_version': '2.7.6 (default, Nov 19 2013, 16:37:14) \\n[GCC 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2336.11.00)]'}\n```\n\n", "before_files": [{"content": "\"\"\"This module defines Exporter, a highly configurable converter\nthat uses Jinja2 to export notebook files into different formats.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function, absolute_import\n\n# Stdlib imports\nimport io\nimport os\nimport copy\nimport collections\nimport datetime\n\n\n# IPython imports\nfrom IPython.config.configurable import LoggingConfigurable\nfrom IPython.config import Config\nfrom IPython.nbformat import current as nbformat\nfrom IPython.utils.traitlets import MetaHasTraits, Unicode, List\nfrom IPython.utils.importstring import import_item\nfrom IPython.utils import text, py3compat\n\n#-----------------------------------------------------------------------------\n# Class\n#-----------------------------------------------------------------------------\n\nclass ResourcesDict(collections.defaultdict):\n def __missing__(self, key):\n return ''\n\n\nclass Exporter(LoggingConfigurable):\n \"\"\"\n Class containing methods that sequentially run a list of preprocessors on a \n NotebookNode object and then return the modified NotebookNode object and \n accompanying resources dict.\n \"\"\"\n\n file_extension = Unicode(\n 'txt', config=True,\n help=\"Extension of the file that should be written to disk\"\n )\n\n #Configurability, allows the user to easily add filters and preprocessors.\n preprocessors = List(config=True,\n help=\"\"\"List of preprocessors, by name or namespace, to enable.\"\"\")\n\n _preprocessors = None\n\n default_preprocessors = List(['IPython.nbconvert.preprocessors.coalesce_streams',\n 'IPython.nbconvert.preprocessors.SVG2PDFPreprocessor',\n 'IPython.nbconvert.preprocessors.ExtractOutputPreprocessor',\n 'IPython.nbconvert.preprocessors.CSSHTMLHeaderPreprocessor',\n 'IPython.nbconvert.preprocessors.RevealHelpPreprocessor',\n 'IPython.nbconvert.preprocessors.LatexPreprocessor',\n 'IPython.nbconvert.preprocessors.HighlightMagicsPreprocessor'],\n config=True,\n help=\"\"\"List of preprocessors available by default, by name, namespace, \n instance, or type.\"\"\")\n\n\n def __init__(self, config=None, **kw):\n \"\"\"\n Public constructor\n\n Parameters\n ----------\n config : config\n User configuration instance.\n \"\"\"\n with_default_config = self.default_config\n if config:\n with_default_config.merge(config)\n \n super(Exporter, self).__init__(config=with_default_config, **kw)\n\n self._init_preprocessors()\n\n\n @property\n def default_config(self):\n return Config()\n\n @nbformat.docstring_nbformat_mod\n def from_notebook_node(self, nb, resources=None, **kw):\n \"\"\"\n Convert a notebook from a notebook node instance.\n\n Parameters\n ----------\n nb : :class:`~{nbformat_mod}.nbbase.NotebookNode`\n Notebook node\n resources : dict\n Additional resources that can be accessed read/write by\n preprocessors and filters.\n **kw\n Ignored (?)\n \"\"\"\n nb_copy = copy.deepcopy(nb)\n resources = self._init_resources(resources)\n\n # Preprocess\n nb_copy, resources = self._preprocess(nb_copy, resources)\n\n return nb_copy, resources\n\n\n def from_filename(self, filename, resources=None, **kw):\n \"\"\"\n Convert a notebook from a notebook file.\n\n Parameters\n ----------\n filename : str\n Full filename of the notebook file to open and convert.\n \"\"\"\n\n # Pull the metadata from the filesystem.\n if resources is None:\n resources = ResourcesDict()\n if not 'metadata' in resources or resources['metadata'] == '':\n resources['metadata'] = ResourcesDict()\n basename = os.path.basename(filename)\n notebook_name = basename[:basename.rfind('.')]\n resources['metadata']['name'] = notebook_name\n\n modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))\n resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)\n\n with io.open(filename) as f:\n return self.from_notebook_node(nbformat.read(f, 'json'), resources=resources, **kw)\n\n\n def from_file(self, file_stream, resources=None, **kw):\n \"\"\"\n Convert a notebook from a notebook file.\n\n Parameters\n ----------\n file_stream : file-like object\n Notebook file-like object to convert.\n \"\"\"\n return self.from_notebook_node(nbformat.read(file_stream, 'json'), resources=resources, **kw)\n\n\n def register_preprocessor(self, preprocessor, enabled=False):\n \"\"\"\n Register a preprocessor.\n Preprocessors are classes that act upon the notebook before it is\n passed into the Jinja templating engine. preprocessors are also\n capable of passing additional information to the Jinja\n templating engine.\n\n Parameters\n ----------\n preprocessor : preprocessor\n \"\"\"\n if preprocessor is None:\n raise TypeError('preprocessor')\n isclass = isinstance(preprocessor, type)\n constructed = not isclass\n\n # Handle preprocessor's registration based on it's type\n if constructed and isinstance(preprocessor, py3compat.string_types):\n # Preprocessor is a string, import the namespace and recursively call\n # this register_preprocessor method\n preprocessor_cls = import_item(preprocessor)\n return self.register_preprocessor(preprocessor_cls, enabled)\n\n if constructed and hasattr(preprocessor, '__call__'):\n # Preprocessor is a function, no need to construct it.\n # Register and return the preprocessor.\n if enabled:\n preprocessor.enabled = True\n self._preprocessors.append(preprocessor)\n return preprocessor\n\n elif isclass and isinstance(preprocessor, MetaHasTraits):\n # Preprocessor is configurable. Make sure to pass in new default for \n # the enabled flag if one was specified.\n self.register_preprocessor(preprocessor(parent=self), enabled)\n\n elif isclass:\n # Preprocessor is not configurable, construct it\n self.register_preprocessor(preprocessor(), enabled)\n\n else:\n # Preprocessor is an instance of something without a __call__ \n # attribute. \n raise TypeError('preprocessor')\n\n\n def _init_preprocessors(self):\n \"\"\"\n Register all of the preprocessors needed for this exporter, disabled\n unless specified explicitly.\n \"\"\"\n if self._preprocessors is None:\n self._preprocessors = []\n\n #Load default preprocessors (not necessarly enabled by default).\n if self.default_preprocessors:\n for preprocessor in self.default_preprocessors:\n self.register_preprocessor(preprocessor)\n\n #Load user preprocessors. Enable by default.\n if self.preprocessors:\n for preprocessor in self.preprocessors:\n self.register_preprocessor(preprocessor, enabled=True)\n\n\n def _init_resources(self, resources):\n\n #Make sure the resources dict is of ResourcesDict type.\n if resources is None:\n resources = ResourcesDict()\n if not isinstance(resources, ResourcesDict):\n new_resources = ResourcesDict()\n new_resources.update(resources)\n resources = new_resources\n\n #Make sure the metadata extension exists in resources\n if 'metadata' in resources:\n if not isinstance(resources['metadata'], ResourcesDict):\n resources['metadata'] = ResourcesDict(resources['metadata'])\n else:\n resources['metadata'] = ResourcesDict()\n if not resources['metadata']['name']:\n resources['metadata']['name'] = 'Notebook'\n\n #Set the output extension\n resources['output_extension'] = self.file_extension\n return resources\n\n\n def _preprocess(self, nb, resources):\n \"\"\"\n Preprocess the notebook before passing it into the Jinja engine.\n To preprocess the notebook is to apply all of the\n\n Parameters\n ----------\n nb : notebook node\n notebook that is being exported.\n resources : a dict of additional resources that\n can be accessed read/write by preprocessors\n \"\"\"\n\n # Do a copy.deepcopy first,\n # we are never safe enough with what the preprocessors could do.\n nbc = copy.deepcopy(nb)\n resc = copy.deepcopy(resources)\n\n #Run each preprocessor on the notebook. Carry the output along\n #to each preprocessor\n for preprocessor in self._preprocessors:\n nbc, resc = preprocessor(nbc, resc)\n return nbc, resc\n", "path": "IPython/nbconvert/exporters/exporter.py"}]} | 3,965 | 145 |
gh_patches_debug_4256 | rasdani/github-patches | git_diff | ivy-llc__ivy-17092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
solve
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/linalg.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
12 )
13 @to_ivy_arrays_and_back
14 def cross(x, y, /, *, axis=9, name=None):
15 x, y = promote_types_of_paddle_inputs(x, y)
16 return ivy.cross(x, y, axis=axis)
17
18
19 # matmul
20 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
21 @to_ivy_arrays_and_back
22 def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
23 x, y = promote_types_of_paddle_inputs(x, y)
24 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
25
26
27 # norm
28 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
29 @to_ivy_arrays_and_back
30 def norm(x, p="fro", axis=None, keepdim=False, name=None):
31 if axis is None and p is not None:
32 if p == "fro":
33 p = 2
34 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
35 if keepdim:
36 ret = ret.reshape([1] * len(x.shape))
37 if len(ret.shape) == 0:
38 return ivy.array([ret])
39 return ret
40
41 if isinstance(axis, tuple):
42 axis = list(axis)
43 if isinstance(axis, list) and len(axis) == 1:
44 axis = axis[0]
45
46 if isinstance(axis, int):
47 if p == "fro":
48 p = 2
49 if p in [0, 1, 2, ivy.inf, -ivy.inf]:
50 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
51 elif isinstance(p, (int, float)):
52 ret = ivy.pow(
53 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
54 float(1.0 / p),
55 )
56
57 elif isinstance(axis, list) and len(axis) == 2:
58 if p == 0:
59 raise ValueError
60 elif p == 1:
61 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
62 elif p == 2 or p == "fro":
63 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
64 elif p == ivy.inf:
65 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
66 elif p == -ivy.inf:
67 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
68 elif isinstance(p, (int, float)) and p > 0:
69 ret = ivy.pow(
70 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
71 float(1.0 / p),
72 )
73 else:
74 raise ValueError
75
76 else:
77 raise ValueError
78
79 if len(ret.shape) == 0:
80 ret = ivy.array(
81 [ret]
82 ) # this is done so as to match shape of output from paddle
83 return ret
84
85
86 # eig
87 @to_ivy_arrays_and_back
88 def eig(x, name=None):
89 return ivy.eig(x)
90
91
92 # eigvals
93 @to_ivy_arrays_and_back
94 def eigvals(x, name=None):
95 return ivy.eigvals(x)
96
97
98 # eigvalsh
99 @to_ivy_arrays_and_back
100 def eigvalsh(x, UPLO="L", name=None):
101 return ivy.eigvalsh(x, UPLO=UPLO)
102
103
104 # eigh
105 @to_ivy_arrays_and_back
106 def eigh(x, UPLO="L", name=None):
107 return ivy.eigh(x, UPLO=UPLO)
108
109
110 # pinv
111 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
112 @to_ivy_arrays_and_back
113 def pinv(x, rcond=1e-15, hermitian=False, name=None):
114 # TODO: Add hermitian functionality
115 return ivy.pinv(x, rtol=rcond)
116
117
118 # cholesky
119 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
120 @to_ivy_arrays_and_back
121 def cholesky(x, /, *, upper=False, name=None):
122 return ivy.cholesky(x, upper=upper)
123
124
125 # bmm
126 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
127 @to_ivy_arrays_and_back
128 def bmm(x, y, transpose_x=False, transpose_y=False, name=None):
129 if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:
130 raise RuntimeError("input must be 3D matrices")
131 x, y = promote_types_of_paddle_inputs(x, y)
132 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
133
[end of ivy/functional/frontends/paddle/tensor/linalg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py
--- a/ivy/functional/frontends/paddle/tensor/linalg.py
+++ b/ivy/functional/frontends/paddle/tensor/linalg.py
@@ -115,6 +115,13 @@
return ivy.pinv(x, rtol=rcond)
+# solve
+@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+@to_ivy_arrays_and_back
+def solve(x1, x2, name=None):
+ return ivy.solve(x1, x2)
+
+
# cholesky
@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -115,6 +115,13 @@\n return ivy.pinv(x, rtol=rcond)\n \n \n+# solve\n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def solve(x1, x2, name=None):\n+ return ivy.solve(x1, x2)\n+\n+\n # cholesky\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n @to_ivy_arrays_and_back\n", "issue": "solve\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# cholesky\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]} | 2,103 | 205 |
gh_patches_debug_63106 | rasdani/github-patches | git_diff | kornia__kornia-1263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] save pointcloud not updates num_points when inf
## 🐛 Bug
The function `K.utils.save_pointcloud_ply` doesn't update the final number of points to be serialized when one of the values contain an infinite value.
How to fix:
update this line https://github.com/kornia/kornia/blob/master/kornia/utils/pointcloud_io.py#L34
```python
if not bool(torch.isfinite(xyz).any()):
continue
```
by
```python
if not bool(torch.isfinite(xyz).any()):
num_points -= 1
continue
```
</issue>
<code>
[start of kornia/utils/pointcloud_io.py]
1 import os
2 from typing import Optional
3
4 import torch
5
6
7 def save_pointcloud_ply(filename: str, pointcloud: torch.Tensor) -> None:
8 r"""Utility function to save to disk a pointcloud in PLY format.
9
10 Args:
11 filename: the path to save the pointcloud.
12 pointcloud: tensor containing the pointcloud to save.
13 The tensor must be in the shape of :math:`(*, 3)` where the last
14 component is assumed to be a 3d point coordinate :math:`(X, Y, Z)`.
15 """
16 if not isinstance(filename, str) and filename[-3:] == '.ply':
17 raise TypeError("Input filename must be a string in with the .ply " "extension. Got {}".format(filename))
18
19 if not torch.is_tensor(pointcloud):
20 raise TypeError(f"Input pointcloud type is not a torch.Tensor. Got {type(pointcloud)}")
21
22 if not len(pointcloud.shape) == 3 and pointcloud.shape[-1] == 3:
23 raise TypeError("Input pointcloud must be in the following shape " "HxWx3. Got {}.".format(pointcloud.shape))
24
25 # flatten the input pointcloud in a vector to iterate points
26 xyz_vec: torch.Tensor = pointcloud.reshape(-1, 3)
27
28 with open(filename, 'w') as f:
29 data_str: str = ''
30 num_points: int = xyz_vec.shape[0]
31 for idx in range(num_points):
32 xyz = xyz_vec[idx]
33 if not bool(torch.isfinite(xyz).any()):
34 continue
35 x: float = xyz[0].item()
36 y: float = xyz[1].item()
37 z: float = xyz[2].item()
38 data_str += f'{x} {y} {z}\n'
39
40 f.write("ply\n")
41 f.write("format ascii 1.0\n")
42 f.write("comment arraiy generated\n")
43 f.write("element vertex %d\n" % num_points)
44 f.write("property double x\n")
45 f.write("property double y\n")
46 f.write("property double z\n")
47 f.write("end_header\n")
48 f.write(data_str)
49
50
51 def load_pointcloud_ply(filename: str, header_size: int = 8) -> torch.Tensor:
52 r"""Utility function to load from disk a pointcloud in PLY format.
53
54 Args:
55 filename: the path to the pointcloud.
56 header_size: the size of the ply file header that will
57 be skipped during loading.
58
59 Return:
60 tensor containing the loaded point with shape :math:`(*, 3)` where
61 :math:`*` represents the number of points.
62 """
63 if not isinstance(filename, str) and filename[-3:] == '.ply':
64 raise TypeError("Input filename must be a string in with the .ply " "extension. Got {}".format(filename))
65 if not os.path.isfile(filename):
66 raise ValueError("Input filename is not an existing file.")
67 if not (isinstance(header_size, int) and header_size > 0):
68 raise TypeError(f"Input header_size must be a positive integer. Got {header_size}.")
69 # open the file and populate tensor
70 with open(filename) as f:
71 points = []
72
73 # skip header
74 lines = f.readlines()[header_size:]
75
76 # iterate over the points
77 for line in lines:
78 x_str, y_str, z_str = line.split()
79 points.append((torch.tensor(float(x_str)), torch.tensor(float(y_str)), torch.tensor(float(z_str))))
80
81 # create tensor from list
82 pointcloud: torch.Tensor = torch.tensor(points)
83 return pointcloud
84
[end of kornia/utils/pointcloud_io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/utils/pointcloud_io.py b/kornia/utils/pointcloud_io.py
--- a/kornia/utils/pointcloud_io.py
+++ b/kornia/utils/pointcloud_io.py
@@ -31,6 +31,7 @@
for idx in range(num_points):
xyz = xyz_vec[idx]
if not bool(torch.isfinite(xyz).any()):
+ num_points -= 1
continue
x: float = xyz[0].item()
y: float = xyz[1].item()
| {"golden_diff": "diff --git a/kornia/utils/pointcloud_io.py b/kornia/utils/pointcloud_io.py\n--- a/kornia/utils/pointcloud_io.py\n+++ b/kornia/utils/pointcloud_io.py\n@@ -31,6 +31,7 @@\n for idx in range(num_points):\n xyz = xyz_vec[idx]\n if not bool(torch.isfinite(xyz).any()):\n+ num_points -= 1\n continue\n x: float = xyz[0].item()\n y: float = xyz[1].item()\n", "issue": "[Bug] save pointcloud not updates num_points when inf\n## \ud83d\udc1b Bug\r\n\r\nThe function `K.utils.save_pointcloud_ply` doesn't update the final number of points to be serialized when one of the values contain an infinite value.\r\n\r\nHow to fix:\r\n\r\nupdate this line https://github.com/kornia/kornia/blob/master/kornia/utils/pointcloud_io.py#L34\r\n\r\n```python\r\n if not bool(torch.isfinite(xyz).any()):\r\n continue\r\n```\r\nby\r\n\r\n```python\r\n if not bool(torch.isfinite(xyz).any()):\r\n num_points -= 1\r\n continue\r\n```\n", "before_files": [{"content": "import os\nfrom typing import Optional\n\nimport torch\n\n\ndef save_pointcloud_ply(filename: str, pointcloud: torch.Tensor) -> None:\n r\"\"\"Utility function to save to disk a pointcloud in PLY format.\n\n Args:\n filename: the path to save the pointcloud.\n pointcloud: tensor containing the pointcloud to save.\n The tensor must be in the shape of :math:`(*, 3)` where the last\n component is assumed to be a 3d point coordinate :math:`(X, Y, Z)`.\n \"\"\"\n if not isinstance(filename, str) and filename[-3:] == '.ply':\n raise TypeError(\"Input filename must be a string in with the .ply \" \"extension. Got {}\".format(filename))\n\n if not torch.is_tensor(pointcloud):\n raise TypeError(f\"Input pointcloud type is not a torch.Tensor. Got {type(pointcloud)}\")\n\n if not len(pointcloud.shape) == 3 and pointcloud.shape[-1] == 3:\n raise TypeError(\"Input pointcloud must be in the following shape \" \"HxWx3. Got {}.\".format(pointcloud.shape))\n\n # flatten the input pointcloud in a vector to iterate points\n xyz_vec: torch.Tensor = pointcloud.reshape(-1, 3)\n\n with open(filename, 'w') as f:\n data_str: str = ''\n num_points: int = xyz_vec.shape[0]\n for idx in range(num_points):\n xyz = xyz_vec[idx]\n if not bool(torch.isfinite(xyz).any()):\n continue\n x: float = xyz[0].item()\n y: float = xyz[1].item()\n z: float = xyz[2].item()\n data_str += f'{x} {y} {z}\\n'\n\n f.write(\"ply\\n\")\n f.write(\"format ascii 1.0\\n\")\n f.write(\"comment arraiy generated\\n\")\n f.write(\"element vertex %d\\n\" % num_points)\n f.write(\"property double x\\n\")\n f.write(\"property double y\\n\")\n f.write(\"property double z\\n\")\n f.write(\"end_header\\n\")\n f.write(data_str)\n\n\ndef load_pointcloud_ply(filename: str, header_size: int = 8) -> torch.Tensor:\n r\"\"\"Utility function to load from disk a pointcloud in PLY format.\n\n Args:\n filename: the path to the pointcloud.\n header_size: the size of the ply file header that will\n be skipped during loading.\n\n Return:\n tensor containing the loaded point with shape :math:`(*, 3)` where\n :math:`*` represents the number of points.\n \"\"\"\n if not isinstance(filename, str) and filename[-3:] == '.ply':\n raise TypeError(\"Input filename must be a string in with the .ply \" \"extension. Got {}\".format(filename))\n if not os.path.isfile(filename):\n raise ValueError(\"Input filename is not an existing file.\")\n if not (isinstance(header_size, int) and header_size > 0):\n raise TypeError(f\"Input header_size must be a positive integer. Got {header_size}.\")\n # open the file and populate tensor\n with open(filename) as f:\n points = []\n\n # skip header\n lines = f.readlines()[header_size:]\n\n # iterate over the points\n for line in lines:\n x_str, y_str, z_str = line.split()\n points.append((torch.tensor(float(x_str)), torch.tensor(float(y_str)), torch.tensor(float(z_str))))\n\n # create tensor from list\n pointcloud: torch.Tensor = torch.tensor(points)\n return pointcloud\n", "path": "kornia/utils/pointcloud_io.py"}]} | 1,634 | 120 |
gh_patches_debug_39139 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-304 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add currently_in_translation to translation coverage
The translation coverage lacks the `currently_in_translation` field, which should also be added.
Add currently_in_translation to translation coverage
The translation coverage lacks the `currently_in_translation` field, which should also be added.
</issue>
<code>
[start of backend/cms/views/analytics/translation_coverage_view.py]
1 """Views related to the statistics module"""
2 from django.contrib.auth.decorators import login_required
3 from django.utils.decorators import method_decorator
4 from django.views.generic import TemplateView
5 from django.shortcuts import render
6
7 from ...models import PageTranslation, Region
8 from ...decorators import region_permission_required
9
10
11 @method_decorator(login_required, name='dispatch')
12 @method_decorator(region_permission_required, name='dispatch')
13 class TranslationCoverageView(TemplateView):
14 """
15 Class to create the translation coverage statistic
16 """
17 template_name = 'analytics/translation_coverage.html'
18 base_context = {'current_menu_item': 'translation_coverage'}
19
20 def get(self, request, *args, **kwargs):
21
22 region = Region.get_current_region(request)
23 num_pages = region.pages.count()
24 languages = []
25
26 for language in region.languages:
27 page_translations = PageTranslation.get_translations(region, language)
28 languages.append({
29 'translated_name': language.translated_name,
30 'num_page_translations_up_to_date': len([t for t in page_translations if not t.is_outdated]),
31 'num_page_translations_outdated': len([t for t in page_translations if t.is_outdated]),
32 'num_page_translations_missing': num_pages - page_translations.count()
33 })
34
35 return render(
36 request,
37 self.template_name,
38 {
39 **self.base_context,
40 'languages': languages
41 }
42 )
43
[end of backend/cms/views/analytics/translation_coverage_view.py]
[start of backend/cms/models/pages/page_translation.py]
1 """Models representing a page and page translation with content
2 """
3 import logging
4
5 from django.conf import settings
6 from django.db import models
7 from django.utils import timezone
8
9 from .page import Page
10 from ..languages.language import Language
11 from ...constants import status
12
13
14 logger = logging.getLogger(__name__)
15
16
17 class PageTranslation(models.Model):
18 """Class defining a Translation of a Page
19
20 Args:
21 models : Class inherit of django-Models
22 """
23
24 page = models.ForeignKey(Page, related_name='translations', on_delete=models.CASCADE)
25 language = models.ForeignKey(
26 Language,
27 related_name='page_translations',
28 on_delete=models.CASCADE
29 )
30 slug = models.SlugField(max_length=200, blank=True, allow_unicode=True)
31 title = models.CharField(max_length=250)
32 text = models.TextField(blank=True)
33 status = models.CharField(max_length=6, choices=status.CHOICES, default=status.DRAFT)
34 currently_in_translation = models.BooleanField(default=False)
35 version = models.PositiveIntegerField(default=0)
36 minor_edit = models.BooleanField(default=False)
37 creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
38 created_date = models.DateTimeField(default=timezone.now)
39 last_updated = models.DateTimeField(auto_now=True)
40
41 @property
42 def foreign_object(self):
43 return self.page
44
45 @property
46 def ancestor_path(self):
47 return '/'.join([
48 ancestor.get_first_translation([self.language.code]).slug
49 for ancestor in self.page.get_ancestors()
50 ])
51
52 @property
53 def permalink(self):
54 return '/'.join(filter(None, [
55 self.page.region.slug,
56 self.language.code,
57 self.ancestor_path,
58 self.slug
59 ]))
60
61 @property
62 def available_languages(self):
63 languages = self.page.languages
64 languages.remove(self.language)
65 available_languages = {}
66 for language in languages:
67 other_translation = self.page.get_public_translation(language.code)
68 if other_translation:
69 available_languages[language.code] = {
70 'id': other_translation.id,
71 'url': other_translation.permalink
72 }
73 return available_languages
74
75 @property
76 def source_translation(self):
77 source_language_tree_node = self.page.region.language_tree_nodes.get(language=self.language).parent
78 if source_language_tree_node:
79 return self.page.get_translation(source_language_tree_node.code)
80 return None
81
82 @property
83 def latest_public_revision(self):
84 return self.page.translations.filter(
85 language=self.language,
86 status=status.PUBLIC,
87 ).first()
88
89 @property
90 def latest_major_revision(self):
91 return self.page.translations.filter(
92 language=self.language,
93 minor_edit=False,
94 ).first()
95
96 @property
97 def latest_major_public_revision(self):
98 return self.page.translations.filter(
99 language=self.language,
100 status=status.PUBLIC,
101 minor_edit=False,
102 ).first()
103
104 @property
105 def previous_revision(self):
106 version = self.version - 1
107 return self.page.translations.filter(
108 language=self.language,
109 version=version,
110 ).first()
111
112 @property
113 def is_outdated(self):
114 source_translation = self.source_translation
115 # If self.language is the root language, this translation can never be outdated
116 if not source_translation:
117 return False
118 # If the source translation is outdated, this translation can not be up to date
119 if source_translation.is_outdated:
120 return True
121 self_revision = self.latest_major_public_revision
122 source_revision = source_translation.latest_major_public_revision
123 # If one of the translations has no major public revision, it cannot be outdated
124 if not self_revision or not source_revision:
125 return False
126 return self_revision.last_updated < source_revision.last_updated
127
128 @property
129 def combined_text(self):
130 """
131 Combines the text from the PageTranslation with the text from the mirrored page.
132 """
133 if self.page.mirrored_page_first:
134 return self.page.get_mirrored_text(self.language.code) + self.text
135 return self.text + self.page.get_mirrored_text(self.language.code)
136
137 @classmethod
138 def get_translations(cls, region, language):
139 return cls.objects.filter(page__region=region, language=language).distinct('page')
140
141 @classmethod
142 def get_outdated_translations(cls, region, language):
143 return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]
144
145 @classmethod
146 def get_up_to_date_translations(cls, region, language):
147 return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if not t.is_outdated]
148
149 def __str__(self):
150 if self.id:
151 return '(id: {}, page_id: {}, lang: {}, version: {}, slug: {})'.format(self.id, self.page.id, self.language.code, self.version, self.slug)
152 return super(PageTranslation, self).__str__()
153
154 class Meta:
155 ordering = ['page', '-version']
156 default_permissions = ()
157
[end of backend/cms/models/pages/page_translation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/cms/models/pages/page_translation.py b/backend/cms/models/pages/page_translation.py
--- a/backend/cms/models/pages/page_translation.py
+++ b/backend/cms/models/pages/page_translation.py
@@ -111,6 +111,9 @@
@property
def is_outdated(self):
+ # If the page translation is currently in translation, it is defined as not outdated
+ if self.currently_in_translation:
+ return False
source_translation = self.source_translation
# If self.language is the root language, this translation can never be outdated
if not source_translation:
@@ -125,6 +128,10 @@
return False
return self_revision.last_updated < source_revision.last_updated
+ @property
+ def is_up_to_date(self):
+ return not self.currently_in_translation and not self.is_outdated
+
@property
def combined_text(self):
"""
@@ -139,12 +146,16 @@
return cls.objects.filter(page__region=region, language=language).distinct('page')
@classmethod
- def get_outdated_translations(cls, region, language):
- return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]
+ def get_up_to_date_translations(cls, region, language):
+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_up_to_date]
@classmethod
- def get_up_to_date_translations(cls, region, language):
- return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if not t.is_outdated]
+ def get_current_translations(cls, region, language):
+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.currently_in_translation]
+
+ @classmethod
+ def get_outdated_translations(cls, region, language):
+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]
def __str__(self):
if self.id:
diff --git a/backend/cms/views/analytics/translation_coverage_view.py b/backend/cms/views/analytics/translation_coverage_view.py
--- a/backend/cms/views/analytics/translation_coverage_view.py
+++ b/backend/cms/views/analytics/translation_coverage_view.py
@@ -27,7 +27,8 @@
page_translations = PageTranslation.get_translations(region, language)
languages.append({
'translated_name': language.translated_name,
- 'num_page_translations_up_to_date': len([t for t in page_translations if not t.is_outdated]),
+ 'num_page_translations_up_to_date': len([t for t in page_translations if t.is_up_to_date]),
+ 'num_page_translations_currently_in_translation': len([t for t in page_translations if t.currently_in_translation]),
'num_page_translations_outdated': len([t for t in page_translations if t.is_outdated]),
'num_page_translations_missing': num_pages - page_translations.count()
})
| {"golden_diff": "diff --git a/backend/cms/models/pages/page_translation.py b/backend/cms/models/pages/page_translation.py\n--- a/backend/cms/models/pages/page_translation.py\n+++ b/backend/cms/models/pages/page_translation.py\n@@ -111,6 +111,9 @@\n \n @property\n def is_outdated(self):\n+ # If the page translation is currently in translation, it is defined as not outdated\n+ if self.currently_in_translation:\n+ return False\n source_translation = self.source_translation\n # If self.language is the root language, this translation can never be outdated\n if not source_translation:\n@@ -125,6 +128,10 @@\n return False\n return self_revision.last_updated < source_revision.last_updated\n \n+ @property\n+ def is_up_to_date(self):\n+ return not self.currently_in_translation and not self.is_outdated\n+\n @property\n def combined_text(self):\n \"\"\"\n@@ -139,12 +146,16 @@\n return cls.objects.filter(page__region=region, language=language).distinct('page')\n \n @classmethod\n- def get_outdated_translations(cls, region, language):\n- return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]\n+ def get_up_to_date_translations(cls, region, language):\n+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_up_to_date]\n \n @classmethod\n- def get_up_to_date_translations(cls, region, language):\n- return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if not t.is_outdated]\n+ def get_current_translations(cls, region, language):\n+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.currently_in_translation]\n+\n+ @classmethod\n+ def get_outdated_translations(cls, region, language):\n+ return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]\n \n def __str__(self):\n if self.id:\ndiff --git a/backend/cms/views/analytics/translation_coverage_view.py b/backend/cms/views/analytics/translation_coverage_view.py\n--- a/backend/cms/views/analytics/translation_coverage_view.py\n+++ b/backend/cms/views/analytics/translation_coverage_view.py\n@@ -27,7 +27,8 @@\n page_translations = PageTranslation.get_translations(region, language)\n languages.append({\n 'translated_name': language.translated_name,\n- 'num_page_translations_up_to_date': len([t for t in page_translations if not t.is_outdated]),\n+ 'num_page_translations_up_to_date': len([t for t in page_translations if t.is_up_to_date]),\n+ 'num_page_translations_currently_in_translation': len([t for t in page_translations if t.currently_in_translation]),\n 'num_page_translations_outdated': len([t for t in page_translations if t.is_outdated]),\n 'num_page_translations_missing': num_pages - page_translations.count()\n })\n", "issue": "Add currently_in_translation to translation coverage\nThe translation coverage lacks the `currently_in_translation` field, which should also be added.\nAdd currently_in_translation to translation coverage\nThe translation coverage lacks the `currently_in_translation` field, which should also be added.\n", "before_files": [{"content": "\"\"\"Views related to the statistics module\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render\n\nfrom ...models import PageTranslation, Region\nfrom ...decorators import region_permission_required\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass TranslationCoverageView(TemplateView):\n \"\"\"\n Class to create the translation coverage statistic\n \"\"\"\n template_name = 'analytics/translation_coverage.html'\n base_context = {'current_menu_item': 'translation_coverage'}\n\n def get(self, request, *args, **kwargs):\n\n region = Region.get_current_region(request)\n num_pages = region.pages.count()\n languages = []\n\n for language in region.languages:\n page_translations = PageTranslation.get_translations(region, language)\n languages.append({\n 'translated_name': language.translated_name,\n 'num_page_translations_up_to_date': len([t for t in page_translations if not t.is_outdated]),\n 'num_page_translations_outdated': len([t for t in page_translations if t.is_outdated]),\n 'num_page_translations_missing': num_pages - page_translations.count()\n })\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'languages': languages\n }\n )\n", "path": "backend/cms/views/analytics/translation_coverage_view.py"}, {"content": "\"\"\"Models representing a page and page translation with content\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom .page import Page\nfrom ..languages.language import Language\nfrom ...constants import status\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageTranslation(models.Model):\n \"\"\"Class defining a Translation of a Page\n\n Args:\n models : Class inherit of django-Models\n \"\"\"\n\n page = models.ForeignKey(Page, related_name='translations', on_delete=models.CASCADE)\n language = models.ForeignKey(\n Language,\n related_name='page_translations',\n on_delete=models.CASCADE\n )\n slug = models.SlugField(max_length=200, blank=True, allow_unicode=True)\n title = models.CharField(max_length=250)\n text = models.TextField(blank=True)\n status = models.CharField(max_length=6, choices=status.CHOICES, default=status.DRAFT)\n currently_in_translation = models.BooleanField(default=False)\n version = models.PositiveIntegerField(default=0)\n minor_edit = models.BooleanField(default=False)\n creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)\n created_date = models.DateTimeField(default=timezone.now)\n last_updated = models.DateTimeField(auto_now=True)\n\n @property\n def foreign_object(self):\n return self.page\n\n @property\n def ancestor_path(self):\n return '/'.join([\n ancestor.get_first_translation([self.language.code]).slug\n for ancestor in self.page.get_ancestors()\n ])\n\n @property\n def permalink(self):\n return '/'.join(filter(None, [\n self.page.region.slug,\n self.language.code,\n self.ancestor_path,\n self.slug\n ]))\n\n @property\n def available_languages(self):\n languages = self.page.languages\n languages.remove(self.language)\n available_languages = {}\n for language in languages:\n other_translation = self.page.get_public_translation(language.code)\n if other_translation:\n available_languages[language.code] = {\n 'id': other_translation.id,\n 'url': other_translation.permalink\n }\n return available_languages\n\n @property\n def source_translation(self):\n source_language_tree_node = self.page.region.language_tree_nodes.get(language=self.language).parent\n if source_language_tree_node:\n return self.page.get_translation(source_language_tree_node.code)\n return None\n\n @property\n def latest_public_revision(self):\n return self.page.translations.filter(\n language=self.language,\n status=status.PUBLIC,\n ).first()\n\n @property\n def latest_major_revision(self):\n return self.page.translations.filter(\n language=self.language,\n minor_edit=False,\n ).first()\n\n @property\n def latest_major_public_revision(self):\n return self.page.translations.filter(\n language=self.language,\n status=status.PUBLIC,\n minor_edit=False,\n ).first()\n\n @property\n def previous_revision(self):\n version = self.version - 1\n return self.page.translations.filter(\n language=self.language,\n version=version,\n ).first()\n\n @property\n def is_outdated(self):\n source_translation = self.source_translation\n # If self.language is the root language, this translation can never be outdated\n if not source_translation:\n return False\n # If the source translation is outdated, this translation can not be up to date\n if source_translation.is_outdated:\n return True\n self_revision = self.latest_major_public_revision\n source_revision = source_translation.latest_major_public_revision\n # If one of the translations has no major public revision, it cannot be outdated\n if not self_revision or not source_revision:\n return False\n return self_revision.last_updated < source_revision.last_updated\n\n @property\n def combined_text(self):\n \"\"\"\n Combines the text from the PageTranslation with the text from the mirrored page.\n \"\"\"\n if self.page.mirrored_page_first:\n return self.page.get_mirrored_text(self.language.code) + self.text\n return self.text + self.page.get_mirrored_text(self.language.code)\n\n @classmethod\n def get_translations(cls, region, language):\n return cls.objects.filter(page__region=region, language=language).distinct('page')\n\n @classmethod\n def get_outdated_translations(cls, region, language):\n return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if t.is_outdated]\n\n @classmethod\n def get_up_to_date_translations(cls, region, language):\n return [t for t in cls.objects.filter(page__region=region, language=language).distinct('page') if not t.is_outdated]\n\n def __str__(self):\n if self.id:\n return '(id: {}, page_id: {}, lang: {}, version: {}, slug: {})'.format(self.id, self.page.id, self.language.code, self.version, self.slug)\n return super(PageTranslation, self).__str__()\n\n class Meta:\n ordering = ['page', '-version']\n default_permissions = ()\n", "path": "backend/cms/models/pages/page_translation.py"}]} | 2,447 | 713 |
gh_patches_debug_42864 | rasdani/github-patches | git_diff | sunpy__sunpy-4129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Maintain coherence between keycomments and the metadict
See #2748
This is probably best implemented by adding the functionality to our `MetaDict` object or something, so that we don't have to do it manually everywhere.
</issue>
<code>
[start of sunpy/util/metadata.py]
1 """
2 This module provides a generalized dictionary class that deals with header
3 parsing and normalization.
4 """
5 from collections import OrderedDict
6
7 __all__ = ['MetaDict']
8
9
10 class MetaDict(OrderedDict):
11 """
12 A class to hold metadata associated with a `sunpy.map.Map
13 <sunpy.map.map_factory.MapFactory.__call__>` derivative.
14
15 This class handles everything in lower case. This allows case
16 insensitive indexing.
17 """
18
19 def __init__(self, *args):
20 """
21 Creates a new MapHeader instance.
22 """
23 # Store all keys as upper-case to allow for case-insensitive indexing
24 # OrderedDict can be instantiated from a list of lists or a tuple of tuples
25 tags = dict()
26 if args:
27 args = list(args)
28 adict = args[0]
29 if isinstance(adict, list) or isinstance(adict, tuple):
30 tags = OrderedDict((k.upper(), v) for k, v in adict)
31 elif isinstance(adict, dict):
32 tags = OrderedDict((k.upper(), v) for k, v in adict.items())
33 else:
34 raise TypeError("Can not create a MetaDict from this type input")
35 args[0] = tags
36
37 super().__init__(*args)
38
39 def __contains__(self, key):
40 """
41 Override ``__contains__``.
42 """
43 return OrderedDict.__contains__(self, key.lower())
44
45 def __getitem__(self, key):
46 """
47 Override ``[]`` indexing.
48 """
49 return OrderedDict.__getitem__(self, key.lower())
50
51 def __setitem__(self, key, value):
52 """
53 Override ``[]`` indexing.
54 """
55 return OrderedDict.__setitem__(self, key.lower(), value)
56
57 def get(self, key, default=None):
58 """
59 Override ``.get()`` indexing.
60 """
61 return OrderedDict.get(self, key.lower(), default)
62
63 def has_key(self, key):
64 """
65 Override ``.has_key()`` to perform case-insensitively.
66 """
67 return key.lower() in self
68
69 def pop(self, key, default=None):
70 """
71 Override ``.pop()`` to perform case-insensitively.
72 """
73 return OrderedDict.pop(self, key.lower(), default)
74
75 def update(self, d2):
76 """
77 Override ``.update()`` to perform case-insensitively.
78 """
79 return OrderedDict.update(self, OrderedDict((k.lower(), v) for k, v in d2.items()))
80
81 def setdefault(self, key, default=None):
82 """
83 Override ``.setdefault()`` to perform case-insensitively.
84 """
85 return OrderedDict.setdefault(self, key.lower(), default)
86
[end of sunpy/util/metadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/util/metadata.py b/sunpy/util/metadata.py
--- a/sunpy/util/metadata.py
+++ b/sunpy/util/metadata.py
@@ -1,6 +1,6 @@
"""
This module provides a generalized dictionary class that deals with header
-parsing and normalization.
+parsing, normalization, and maintaining coherence between keys and keycomments.
"""
from collections import OrderedDict
@@ -14,28 +14,67 @@
This class handles everything in lower case. This allows case
insensitive indexing.
+
+ If the key 'keycomments' exists, its value must be a dictionary mapping
+ keys in the `MetaDict` to their comments. The casing of keys in the
+ keycomments dictionary is not significant. If a key is removed from the
+ `MetaDict`, it will also be removed from the keycomments dictionary.
+ Additionally, any extraneous keycomments will be removed when the
+ `MetaDict` is instantiated.
"""
def __init__(self, *args):
"""
- Creates a new MapHeader instance.
+ Creates a new MetaDict instance.
"""
- # Store all keys as upper-case to allow for case-insensitive indexing
+ # Store all keys as lower-case to allow for case-insensitive indexing
# OrderedDict can be instantiated from a list of lists or a tuple of tuples
tags = dict()
if args:
args = list(args)
adict = args[0]
if isinstance(adict, list) or isinstance(adict, tuple):
- tags = OrderedDict((k.upper(), v) for k, v in adict)
+ tags = OrderedDict((k.lower(), v) for k, v in adict)
elif isinstance(adict, dict):
- tags = OrderedDict((k.upper(), v) for k, v in adict.items())
+ tags = OrderedDict((k.lower(), v) for k, v in adict.items())
else:
raise TypeError("Can not create a MetaDict from this type input")
args[0] = tags
super().__init__(*args)
+ # Use `copy=True` to avoid mutating the caller's keycomments
+ # dictionary (if they provided one).
+ self._prune_keycomments(copy=True)
+
+ def _prune_keycomments(self, copy=False):
+ """
+ Remove keycomments for keys that are not contained in the MetaDict.
+
+ Parameters
+ ----------
+ copy : `bool`, optional
+ Make a copy of the current keycomments dict before removing keys.
+ """
+ if 'keycomments' not in self:
+ return
+
+ keycomments = self['keycomments']
+
+ if not isinstance(keycomments, dict):
+ raise TypeError(
+ "'keycomments' key must have a value of type `dict`. Found "
+ "the following type: %r" % type(keycomments))
+
+ if copy:
+ keycomments = keycomments.copy()
+
+ for key in list(keycomments.keys()):
+ if key not in self:
+ del keycomments[key]
+
+ self['keycomments'] = keycomments
+
def __contains__(self, key):
"""
Override ``__contains__``.
@@ -54,6 +93,15 @@
"""
return OrderedDict.__setitem__(self, key.lower(), value)
+ # Note: `OrderedDict.popitem()` does not need to be overridden to prune
+ # keycomments because it calls `__delitem__` internally.
+ def __delitem__(self, key):
+ """
+ Override ``del dict[key]`` key deletion.
+ """
+ OrderedDict.__delitem__(self, key.lower())
+ self._prune_keycomments()
+
def get(self, key, default=None):
"""
Override ``.get()`` indexing.
@@ -70,7 +118,11 @@
"""
Override ``.pop()`` to perform case-insensitively.
"""
- return OrderedDict.pop(self, key.lower(), default)
+ has_key = key in self
+ result = OrderedDict.pop(self, key.lower(), default)
+ if has_key:
+ self._prune_keycomments()
+ return result
def update(self, d2):
"""
| {"golden_diff": "diff --git a/sunpy/util/metadata.py b/sunpy/util/metadata.py\n--- a/sunpy/util/metadata.py\n+++ b/sunpy/util/metadata.py\n@@ -1,6 +1,6 @@\n \"\"\"\n This module provides a generalized dictionary class that deals with header\n-parsing and normalization.\n+parsing, normalization, and maintaining coherence between keys and keycomments.\n \"\"\"\n from collections import OrderedDict\n \n@@ -14,28 +14,67 @@\n \n This class handles everything in lower case. This allows case\n insensitive indexing.\n+\n+ If the key 'keycomments' exists, its value must be a dictionary mapping\n+ keys in the `MetaDict` to their comments. The casing of keys in the\n+ keycomments dictionary is not significant. If a key is removed from the\n+ `MetaDict`, it will also be removed from the keycomments dictionary.\n+ Additionally, any extraneous keycomments will be removed when the\n+ `MetaDict` is instantiated.\n \"\"\"\n \n def __init__(self, *args):\n \"\"\"\n- Creates a new MapHeader instance.\n+ Creates a new MetaDict instance.\n \"\"\"\n- # Store all keys as upper-case to allow for case-insensitive indexing\n+ # Store all keys as lower-case to allow for case-insensitive indexing\n # OrderedDict can be instantiated from a list of lists or a tuple of tuples\n tags = dict()\n if args:\n args = list(args)\n adict = args[0]\n if isinstance(adict, list) or isinstance(adict, tuple):\n- tags = OrderedDict((k.upper(), v) for k, v in adict)\n+ tags = OrderedDict((k.lower(), v) for k, v in adict)\n elif isinstance(adict, dict):\n- tags = OrderedDict((k.upper(), v) for k, v in adict.items())\n+ tags = OrderedDict((k.lower(), v) for k, v in adict.items())\n else:\n raise TypeError(\"Can not create a MetaDict from this type input\")\n args[0] = tags\n \n super().__init__(*args)\n \n+ # Use `copy=True` to avoid mutating the caller's keycomments\n+ # dictionary (if they provided one).\n+ self._prune_keycomments(copy=True)\n+\n+ def _prune_keycomments(self, copy=False):\n+ \"\"\"\n+ Remove keycomments for keys that are not contained in the MetaDict.\n+\n+ Parameters\n+ ----------\n+ copy : `bool`, optional\n+ Make a copy of the current keycomments dict before removing keys.\n+ \"\"\"\n+ if 'keycomments' not in self:\n+ return\n+\n+ keycomments = self['keycomments']\n+\n+ if not isinstance(keycomments, dict):\n+ raise TypeError(\n+ \"'keycomments' key must have a value of type `dict`. Found \"\n+ \"the following type: %r\" % type(keycomments))\n+\n+ if copy:\n+ keycomments = keycomments.copy()\n+\n+ for key in list(keycomments.keys()):\n+ if key not in self:\n+ del keycomments[key]\n+\n+ self['keycomments'] = keycomments\n+\n def __contains__(self, key):\n \"\"\"\n Override ``__contains__``.\n@@ -54,6 +93,15 @@\n \"\"\"\n return OrderedDict.__setitem__(self, key.lower(), value)\n \n+ # Note: `OrderedDict.popitem()` does not need to be overridden to prune\n+ # keycomments because it calls `__delitem__` internally.\n+ def __delitem__(self, key):\n+ \"\"\"\n+ Override ``del dict[key]`` key deletion.\n+ \"\"\"\n+ OrderedDict.__delitem__(self, key.lower())\n+ self._prune_keycomments()\n+\n def get(self, key, default=None):\n \"\"\"\n Override ``.get()`` indexing.\n@@ -70,7 +118,11 @@\n \"\"\"\n Override ``.pop()`` to perform case-insensitively.\n \"\"\"\n- return OrderedDict.pop(self, key.lower(), default)\n+ has_key = key in self\n+ result = OrderedDict.pop(self, key.lower(), default)\n+ if has_key:\n+ self._prune_keycomments()\n+ return result\n \n def update(self, d2):\n \"\"\"\n", "issue": "Maintain coherence between keycomments and the metadict\nSee #2748 \r\n\r\nThis is probably best implemented by adding the functionality to our `MetaDict` object or something, so that we don't have to do it manually everywhere.\n", "before_files": [{"content": "\"\"\"\nThis module provides a generalized dictionary class that deals with header\nparsing and normalization.\n\"\"\"\nfrom collections import OrderedDict\n\n__all__ = ['MetaDict']\n\n\nclass MetaDict(OrderedDict):\n \"\"\"\n A class to hold metadata associated with a `sunpy.map.Map\n <sunpy.map.map_factory.MapFactory.__call__>` derivative.\n\n This class handles everything in lower case. This allows case\n insensitive indexing.\n \"\"\"\n\n def __init__(self, *args):\n \"\"\"\n Creates a new MapHeader instance.\n \"\"\"\n # Store all keys as upper-case to allow for case-insensitive indexing\n # OrderedDict can be instantiated from a list of lists or a tuple of tuples\n tags = dict()\n if args:\n args = list(args)\n adict = args[0]\n if isinstance(adict, list) or isinstance(adict, tuple):\n tags = OrderedDict((k.upper(), v) for k, v in adict)\n elif isinstance(adict, dict):\n tags = OrderedDict((k.upper(), v) for k, v in adict.items())\n else:\n raise TypeError(\"Can not create a MetaDict from this type input\")\n args[0] = tags\n\n super().__init__(*args)\n\n def __contains__(self, key):\n \"\"\"\n Override ``__contains__``.\n \"\"\"\n return OrderedDict.__contains__(self, key.lower())\n\n def __getitem__(self, key):\n \"\"\"\n Override ``[]`` indexing.\n \"\"\"\n return OrderedDict.__getitem__(self, key.lower())\n\n def __setitem__(self, key, value):\n \"\"\"\n Override ``[]`` indexing.\n \"\"\"\n return OrderedDict.__setitem__(self, key.lower(), value)\n\n def get(self, key, default=None):\n \"\"\"\n Override ``.get()`` indexing.\n \"\"\"\n return OrderedDict.get(self, key.lower(), default)\n\n def has_key(self, key):\n \"\"\"\n Override ``.has_key()`` to perform case-insensitively.\n \"\"\"\n return key.lower() in self\n\n def pop(self, key, default=None):\n \"\"\"\n Override ``.pop()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.pop(self, key.lower(), default)\n\n def update(self, d2):\n \"\"\"\n Override ``.update()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.update(self, OrderedDict((k.lower(), v) for k, v in d2.items()))\n\n def setdefault(self, key, default=None):\n \"\"\"\n Override ``.setdefault()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.setdefault(self, key.lower(), default)\n", "path": "sunpy/util/metadata.py"}]} | 1,325 | 951 |
gh_patches_debug_14578 | rasdani/github-patches | git_diff | vllm-project__vllm-2151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
torch.distributed.all_reduce does not free memory
I've visualized the memory usage:
* llama 7B, TP=1
<img width="3346" alt="Screenshot 2023-12-16 at 11 14 03 PM" src="https://github.com/vllm-project/vllm/assets/46394894/e6ed7069-2190-4823-8f25-8e27bd94fe35">
The activation memory is reused after every layer.
* llama-70B, TP=8
<img width="3247" alt="Screenshot 2023-12-16 at 11 20 10 PM" src="https://github.com/vllm-project/vllm/assets/46394894/b5f492bb-7262-4c06-a040-7796e0f7fc06">
**However, when using TP, the activation memory for all reduce is not reused**
_Originally posted by @WoosukKwon in https://github.com/vllm-project/vllm/pull/2031#discussion_r1429046645_
</issue>
<code>
[start of vllm/worker/worker.py]
1 """A GPU worker class."""
2 import os
3 from typing import Dict, List, Optional, Tuple
4
5 import torch
6 import torch.distributed
7
8 from vllm.config import (CacheConfig, ModelConfig, ParallelConfig,
9 SchedulerConfig)
10 from vllm.model_executor import set_random_seed
11 from vllm.model_executor.parallel_utils import cupy_utils
12 from vllm.model_executor.parallel_utils.parallel_state import (
13 initialize_model_parallel)
14 from vllm.sequence import SamplerOutput, SequenceGroupMetadata
15 from vllm.worker.cache_engine import CacheEngine
16 from vllm.worker.model_runner import ModelRunner
17
18
19 class Worker:
20 """A worker class that executes (a partition of) the model on a GPU.
21
22 Each worker is associated with a single GPU. The worker is responsible for
23 maintaining the KV cache and executing the model on the GPU. In case of
24 distributed inference, each worker is assigned a partition of the model.
25 """
26
27 def __init__(
28 self,
29 model_config: ModelConfig,
30 parallel_config: ParallelConfig,
31 scheduler_config: SchedulerConfig,
32 rank: Optional[int] = None,
33 distributed_init_method: Optional[str] = None,
34 ) -> None:
35 self.model_config = model_config
36 self.parallel_config = parallel_config
37 self.scheduler_config = scheduler_config
38 self.rank = rank
39 self.distributed_init_method = distributed_init_method
40
41 self.model_runner = ModelRunner(model_config, parallel_config,
42 scheduler_config)
43 # Uninitialized cache engine. Will be initialized by
44 # self.init_cache_engine().
45 self.cache_config = None
46 self.cache_engine = None
47 self.cache_events = None
48 self.gpu_cache = None
49
50 def init_model(self, cupy_port: Optional[int] = None):
51 # This env var set by Ray causes exceptions with graph building.
52 os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
53 # Env vars will be set by Ray.
54 self.rank = self.rank if self.rank is not None else int(
55 os.getenv("RANK", "-1"))
56 local_rank = int(os.getenv("LOCAL_RANK", "0"))
57 self.device = torch.device(f"cuda:{local_rank}")
58 if self.rank < 0:
59 raise ValueError("Invalid or unspecified rank.")
60 torch.cuda.set_device(self.device)
61
62 _check_if_gpu_supports_dtype(self.model_config.dtype)
63
64 # Initialize the distributed environment.
65 _init_distributed_environment(self.parallel_config, self.rank,
66 cupy_port, self.distributed_init_method)
67
68 # Initialize the model.
69 set_random_seed(self.model_config.seed)
70
71 def load_model(self):
72 self.model_runner.load_model()
73
74 @torch.inference_mode()
75 def profile_num_available_blocks(
76 self,
77 block_size: int,
78 gpu_memory_utilization: float,
79 cpu_swap_space: int,
80 ) -> Tuple[int, int]:
81 # Profile the memory usage of the model and get the maximum number of
82 # cache blocks that can be allocated with the remaining free memory.
83 torch.cuda.empty_cache()
84
85 # Execute a forward pass with dummy inputs to profile the memory usage
86 # of the model.
87 self.model_runner.profile_run()
88
89 # Calculate the number of blocks that can be allocated with the
90 # profiled peak memory.
91 torch.cuda.synchronize()
92 free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
93 peak_memory = total_gpu_memory - free_gpu_memory
94
95 cache_block_size = CacheEngine.get_cache_block_size(
96 block_size, self.model_config, self.parallel_config)
97 num_gpu_blocks = int(
98 (total_gpu_memory * gpu_memory_utilization - peak_memory) //
99 cache_block_size)
100 num_cpu_blocks = int(cpu_swap_space // cache_block_size)
101 num_gpu_blocks = max(num_gpu_blocks, 0)
102 num_cpu_blocks = max(num_cpu_blocks, 0)
103 torch.cuda.empty_cache()
104 return num_gpu_blocks, num_cpu_blocks
105
106 def init_cache_engine(self, cache_config: CacheConfig) -> None:
107 self.cache_config = cache_config
108 self.cache_engine = CacheEngine(self.cache_config, self.model_config,
109 self.parallel_config)
110 self.cache_events = self.cache_engine.events
111 self.gpu_cache = self.cache_engine.gpu_cache
112 self.model_runner.set_block_size(self.cache_engine.block_size)
113
114 def warm_up_model(self) -> None:
115 if not self.model_config.enforce_eager:
116 self.model_runner.capture_model(self.gpu_cache)
117 # Reset the seed to ensure that the random state is not affected by
118 # the model initialization and profiling.
119 set_random_seed(self.model_config.seed)
120
121 @torch.inference_mode()
122 def execute_model(
123 self,
124 seq_group_metadata_list: List[SequenceGroupMetadata],
125 blocks_to_swap_in: Dict[int, int],
126 blocks_to_swap_out: Dict[int, int],
127 blocks_to_copy: Dict[int, List[int]],
128 ) -> SamplerOutput:
129 # Issue cache operations.
130 issued_cache_op = False
131 if blocks_to_swap_in:
132 self.cache_engine.swap_in(blocks_to_swap_in)
133 issued_cache_op = True
134 if blocks_to_swap_out:
135 self.cache_engine.swap_out(blocks_to_swap_out)
136 issued_cache_op = True
137 if blocks_to_copy:
138 self.cache_engine.copy(blocks_to_copy)
139 issued_cache_op = True
140
141 cache_events = self.cache_events if issued_cache_op else None
142
143 # Wait for cache operations to finish.
144 # TODO(woosuk): Profile swapping overhead and optimize if needed.
145 if cache_events is not None:
146 for event in cache_events:
147 event.wait()
148 # If there is no input, we don't need to execute the model.
149 if not seq_group_metadata_list:
150 return {}
151
152 output = self.model_runner.execute_model(seq_group_metadata_list,
153 self.gpu_cache)
154 return output
155
156
157 def _init_distributed_environment(
158 parallel_config: ParallelConfig,
159 rank: int,
160 cupy_port: Optional[int],
161 distributed_init_method: Optional[str] = None,
162 ) -> None:
163 """Initialize the distributed environment."""
164 if torch.distributed.is_initialized():
165 torch_world_size = torch.distributed.get_world_size()
166 if torch_world_size != parallel_config.world_size:
167 raise RuntimeError(
168 "torch.distributed is already initialized but the torch world "
169 "size does not match parallel_config.world_size "
170 f"({torch_world_size} vs. {parallel_config.world_size}).")
171 elif not distributed_init_method:
172 raise ValueError(
173 "distributed_init_method must be set if torch.distributed "
174 "is not already initialized")
175 else:
176 torch.distributed.init_process_group(
177 backend="nccl",
178 world_size=parallel_config.world_size,
179 rank=rank,
180 init_method=distributed_init_method,
181 )
182
183 if cupy_utils.is_initialized():
184 cupy_world_size = cupy_utils.get_world_size()
185 if cupy_world_size != parallel_config.world_size:
186 raise RuntimeError(
187 "cupy.distributed is already initialized but the cupy world "
188 "size does not match parallel_config.world_size "
189 f"({cupy_world_size} vs. {parallel_config.world_size}).")
190 elif parallel_config.world_size > 1:
191 # NOTE(woosuk): We don't initialize CuPy process group when world size
192 # is 1.
193 # TODO(woosuk): Support multi-node connection.
194 cupy_utils.init_process_group(
195 world_size=parallel_config.world_size,
196 rank=rank,
197 host="localhost",
198 port=cupy_port,
199 )
200
201 if parallel_config.world_size > 1:
202 # A small all_reduce for warmup.
203 torch.distributed.all_reduce(torch.zeros(1).cuda())
204 cupy_utils.all_reduce(torch.zeros(1).cuda())
205
206 initialize_model_parallel(parallel_config.tensor_parallel_size,
207 parallel_config.pipeline_parallel_size)
208
209
210 def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
211 # Check if the GPU supports the dtype.
212 if torch_dtype == torch.bfloat16:
213 compute_capability = torch.cuda.get_device_capability()
214 if compute_capability[0] < 8:
215 gpu_name = torch.cuda.get_device_name()
216 raise ValueError(
217 "Bfloat16 is only supported on GPUs with compute capability "
218 f"of at least 8.0. Your {gpu_name} GPU has compute capability "
219 f"{compute_capability[0]}.{compute_capability[1]}.")
220
[end of vllm/worker/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py
--- a/vllm/worker/worker.py
+++ b/vllm/worker/worker.py
@@ -48,6 +48,14 @@
self.gpu_cache = None
def init_model(self, cupy_port: Optional[int] = None):
+ # torch.distributed.all_reduce does not free the input tensor until
+ # the synchronization point. This causes the memory usage to grow
+ # as the number of all_reduce calls increases. This env var disables
+ # this behavior.
+ # Related issue:
+ # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
+
# This env var set by Ray causes exceptions with graph building.
os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
# Env vars will be set by Ray.
| {"golden_diff": "diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py\n--- a/vllm/worker/worker.py\n+++ b/vllm/worker/worker.py\n@@ -48,6 +48,14 @@\n self.gpu_cache = None\n \n def init_model(self, cupy_port: Optional[int] = None):\n+ # torch.distributed.all_reduce does not free the input tensor until\n+ # the synchronization point. This causes the memory usage to grow\n+ # as the number of all_reduce calls increases. This env var disables\n+ # this behavior.\n+ # Related issue:\n+ # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573\n+ os.environ[\"TORCH_NCCL_AVOID_RECORD_STREAMS\"] = \"1\"\n+\n # This env var set by Ray causes exceptions with graph building.\n os.environ.pop(\"NCCL_ASYNC_ERROR_HANDLING\", None)\n # Env vars will be set by Ray.\n", "issue": "torch.distributed.all_reduce does not free memory\n I've visualized the memory usage:\r\n\r\n* llama 7B, TP=1\r\n<img width=\"3346\" alt=\"Screenshot 2023-12-16 at 11 14 03\u202fPM\" src=\"https://github.com/vllm-project/vllm/assets/46394894/e6ed7069-2190-4823-8f25-8e27bd94fe35\">\r\n\r\nThe activation memory is reused after every layer.\r\n\r\n* llama-70B, TP=8\r\n<img width=\"3247\" alt=\"Screenshot 2023-12-16 at 11 20 10\u202fPM\" src=\"https://github.com/vllm-project/vllm/assets/46394894/b5f492bb-7262-4c06-a040-7796e0f7fc06\">\r\n\r\n**However, when using TP, the activation memory for all reduce is not reused**\r\n\r\n_Originally posted by @WoosukKwon in https://github.com/vllm-project/vllm/pull/2031#discussion_r1429046645_\r\n \n", "before_files": [{"content": "\"\"\"A GPU worker class.\"\"\"\nimport os\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nimport torch.distributed\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\nfrom vllm.model_executor import set_random_seed\nfrom vllm.model_executor.parallel_utils import cupy_utils\nfrom vllm.model_executor.parallel_utils.parallel_state import (\n initialize_model_parallel)\nfrom vllm.sequence import SamplerOutput, SequenceGroupMetadata\nfrom vllm.worker.cache_engine import CacheEngine\nfrom vllm.worker.model_runner import ModelRunner\n\n\nclass Worker:\n \"\"\"A worker class that executes (a partition of) the model on a GPU.\n\n Each worker is associated with a single GPU. The worker is responsible for\n maintaining the KV cache and executing the model on the GPU. In case of\n distributed inference, each worker is assigned a partition of the model.\n \"\"\"\n\n def __init__(\n self,\n model_config: ModelConfig,\n parallel_config: ParallelConfig,\n scheduler_config: SchedulerConfig,\n rank: Optional[int] = None,\n distributed_init_method: Optional[str] = None,\n ) -> None:\n self.model_config = model_config\n self.parallel_config = parallel_config\n self.scheduler_config = scheduler_config\n self.rank = rank\n self.distributed_init_method = distributed_init_method\n\n self.model_runner = ModelRunner(model_config, parallel_config,\n scheduler_config)\n # Uninitialized cache engine. Will be initialized by\n # self.init_cache_engine().\n self.cache_config = None\n self.cache_engine = None\n self.cache_events = None\n self.gpu_cache = None\n\n def init_model(self, cupy_port: Optional[int] = None):\n # This env var set by Ray causes exceptions with graph building.\n os.environ.pop(\"NCCL_ASYNC_ERROR_HANDLING\", None)\n # Env vars will be set by Ray.\n self.rank = self.rank if self.rank is not None else int(\n os.getenv(\"RANK\", \"-1\"))\n local_rank = int(os.getenv(\"LOCAL_RANK\", \"0\"))\n self.device = torch.device(f\"cuda:{local_rank}\")\n if self.rank < 0:\n raise ValueError(\"Invalid or unspecified rank.\")\n torch.cuda.set_device(self.device)\n\n _check_if_gpu_supports_dtype(self.model_config.dtype)\n\n # Initialize the distributed environment.\n _init_distributed_environment(self.parallel_config, self.rank,\n cupy_port, self.distributed_init_method)\n\n # Initialize the model.\n set_random_seed(self.model_config.seed)\n\n def load_model(self):\n self.model_runner.load_model()\n\n @torch.inference_mode()\n def profile_num_available_blocks(\n self,\n block_size: int,\n gpu_memory_utilization: float,\n cpu_swap_space: int,\n ) -> Tuple[int, int]:\n # Profile the memory usage of the model and get the maximum number of\n # cache blocks that can be allocated with the remaining free memory.\n torch.cuda.empty_cache()\n\n # Execute a forward pass with dummy inputs to profile the memory usage\n # of the model.\n self.model_runner.profile_run()\n\n # Calculate the number of blocks that can be allocated with the\n # profiled peak memory.\n torch.cuda.synchronize()\n free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()\n peak_memory = total_gpu_memory - free_gpu_memory\n\n cache_block_size = CacheEngine.get_cache_block_size(\n block_size, self.model_config, self.parallel_config)\n num_gpu_blocks = int(\n (total_gpu_memory * gpu_memory_utilization - peak_memory) //\n cache_block_size)\n num_cpu_blocks = int(cpu_swap_space // cache_block_size)\n num_gpu_blocks = max(num_gpu_blocks, 0)\n num_cpu_blocks = max(num_cpu_blocks, 0)\n torch.cuda.empty_cache()\n return num_gpu_blocks, num_cpu_blocks\n\n def init_cache_engine(self, cache_config: CacheConfig) -> None:\n self.cache_config = cache_config\n self.cache_engine = CacheEngine(self.cache_config, self.model_config,\n self.parallel_config)\n self.cache_events = self.cache_engine.events\n self.gpu_cache = self.cache_engine.gpu_cache\n self.model_runner.set_block_size(self.cache_engine.block_size)\n\n def warm_up_model(self) -> None:\n if not self.model_config.enforce_eager:\n self.model_runner.capture_model(self.gpu_cache)\n # Reset the seed to ensure that the random state is not affected by\n # the model initialization and profiling.\n set_random_seed(self.model_config.seed)\n\n @torch.inference_mode()\n def execute_model(\n self,\n seq_group_metadata_list: List[SequenceGroupMetadata],\n blocks_to_swap_in: Dict[int, int],\n blocks_to_swap_out: Dict[int, int],\n blocks_to_copy: Dict[int, List[int]],\n ) -> SamplerOutput:\n # Issue cache operations.\n issued_cache_op = False\n if blocks_to_swap_in:\n self.cache_engine.swap_in(blocks_to_swap_in)\n issued_cache_op = True\n if blocks_to_swap_out:\n self.cache_engine.swap_out(blocks_to_swap_out)\n issued_cache_op = True\n if blocks_to_copy:\n self.cache_engine.copy(blocks_to_copy)\n issued_cache_op = True\n\n cache_events = self.cache_events if issued_cache_op else None\n\n # Wait for cache operations to finish.\n # TODO(woosuk): Profile swapping overhead and optimize if needed.\n if cache_events is not None:\n for event in cache_events:\n event.wait()\n # If there is no input, we don't need to execute the model.\n if not seq_group_metadata_list:\n return {}\n\n output = self.model_runner.execute_model(seq_group_metadata_list,\n self.gpu_cache)\n return output\n\n\ndef _init_distributed_environment(\n parallel_config: ParallelConfig,\n rank: int,\n cupy_port: Optional[int],\n distributed_init_method: Optional[str] = None,\n) -> None:\n \"\"\"Initialize the distributed environment.\"\"\"\n if torch.distributed.is_initialized():\n torch_world_size = torch.distributed.get_world_size()\n if torch_world_size != parallel_config.world_size:\n raise RuntimeError(\n \"torch.distributed is already initialized but the torch world \"\n \"size does not match parallel_config.world_size \"\n f\"({torch_world_size} vs. {parallel_config.world_size}).\")\n elif not distributed_init_method:\n raise ValueError(\n \"distributed_init_method must be set if torch.distributed \"\n \"is not already initialized\")\n else:\n torch.distributed.init_process_group(\n backend=\"nccl\",\n world_size=parallel_config.world_size,\n rank=rank,\n init_method=distributed_init_method,\n )\n\n if cupy_utils.is_initialized():\n cupy_world_size = cupy_utils.get_world_size()\n if cupy_world_size != parallel_config.world_size:\n raise RuntimeError(\n \"cupy.distributed is already initialized but the cupy world \"\n \"size does not match parallel_config.world_size \"\n f\"({cupy_world_size} vs. {parallel_config.world_size}).\")\n elif parallel_config.world_size > 1:\n # NOTE(woosuk): We don't initialize CuPy process group when world size\n # is 1.\n # TODO(woosuk): Support multi-node connection.\n cupy_utils.init_process_group(\n world_size=parallel_config.world_size,\n rank=rank,\n host=\"localhost\",\n port=cupy_port,\n )\n\n if parallel_config.world_size > 1:\n # A small all_reduce for warmup.\n torch.distributed.all_reduce(torch.zeros(1).cuda())\n cupy_utils.all_reduce(torch.zeros(1).cuda())\n\n initialize_model_parallel(parallel_config.tensor_parallel_size,\n parallel_config.pipeline_parallel_size)\n\n\ndef _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):\n # Check if the GPU supports the dtype.\n if torch_dtype == torch.bfloat16:\n compute_capability = torch.cuda.get_device_capability()\n if compute_capability[0] < 8:\n gpu_name = torch.cuda.get_device_name()\n raise ValueError(\n \"Bfloat16 is only supported on GPUs with compute capability \"\n f\"of at least 8.0. Your {gpu_name} GPU has compute capability \"\n f\"{compute_capability[0]}.{compute_capability[1]}.\")\n", "path": "vllm/worker/worker.py"}]} | 3,194 | 232 |
gh_patches_debug_31866 | rasdani/github-patches | git_diff | intel__dffml-566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature: add a load function in high level
Add a load function in `high_level.py` so the data from sources can be loaded.
After completing this, #555 can be worked on.
</issue>
<code>
[start of dffml/high_level.py]
1 """
2 High level abstraction interfaces to DFFML. These are probably going to be used
3 in a lot of quick and dirty python files.
4 """
5 import pathlib
6 from typing import Union, Dict, Any
7
8 from .record import Record
9 from .source.source import Sources, BaseSource
10 from .source.memory import MemorySource, MemorySourceConfig
11
12
13 def _records_to_sources(*args):
14 """
15 Create a memory source out of any records passed as a variable length list.
16 Add all sources found in the variable length list to a list of sources, and
17 the created source containing records, and return that list of sources.
18 """
19 # If the first arg is an instance of sources, append the rest to that.
20 if args and isinstance(args[0], Sources):
21 sources = args[0]
22 else:
23 sources = Sources(
24 *[arg for arg in args if isinstance(arg, BaseSource)]
25 )
26 # Records to add to memory source
27 records = []
28 # Make args mutable
29 args = list(args)
30 # Convert dicts to records
31 for i, arg in enumerate(args):
32 if isinstance(arg, dict):
33 arg = Record(i, data={"features": arg})
34 if isinstance(arg, Record):
35 records.append(arg)
36 if isinstance(arg, str) and "." in arg:
37 filepath = pathlib.Path(arg)
38 source = BaseSource.load(filepath.suffix.replace(".", ""))
39 sources.append(source(filename=arg))
40 # Create memory source if there are any records
41 if records:
42 sources.append(MemorySource(MemorySourceConfig(records=records)))
43 return sources
44
45
46 async def save(source: BaseSource, *args: Record) -> None:
47 """
48 Update a source's knowledge about given records.
49
50 For each record given, call
51 :py:func:`update <dffml.source.source.BaseSourceContext.update>` on the
52 source. Effectively saving all the records to the source.
53
54 Parameters
55 ----------
56 source : BaseSource
57 Data source to use. See :doc:`/plugins/dffml_source` for sources and
58 options.
59 *args : list
60 Records to be saved.
61
62 Examples
63 --------
64
65 >>> source = CSVSource(filename="save.csv", allowempty=True, readwrite=True)
66 >>>
67 >>> async def main():
68 ... await save(
69 ... source,
70 ... Record(
71 ... "myrecord",
72 ... data={
73 ... "features": {"Years": 0, "Expertise": 1, "Trust": 0.1},
74 ... "prediction": {"Salary": {"value": 10, "confidence": 1.0}},
75 ... }
76 ... )
77 ... )
78 ... print(pathlib.Path("save.csv").read_text().strip())
79 >>>
80 >>> asyncio.run(main())
81 key,tag,Expertise,Trust,Years,prediction_Salary,confidence_Salary
82 myrecord,untagged,1,0.1,0,10,1.0
83 """
84 async with source:
85 async with source() as sctx:
86 for record in args:
87 await sctx.update(record)
88
89
90 async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):
91 """
92 Train a machine learning model.
93
94 Provide records to the model to train it. The model should be already
95 instantiated.
96
97 Parameters
98 ----------
99 model : Model
100 Machine Learning model to use. See :doc:`/plugins/dffml_model` for
101 models options.
102 *args : list
103 Input data for training. Could be a ``dict``, :py:class:`Record`,
104 filename, one of the data :doc:`/plugins/dffml_source`, or a filename
105 with the extension being one of the data sources.
106
107 Examples
108 --------
109
110 >>> model = LinearRegressionModel(
111 ... features=Features(
112 ... DefFeature("Years", int, 1),
113 ... DefFeature("Expertise", int, 1),
114 ... DefFeature("Trust", float, 1),
115 ... ),
116 ... predict=DefFeature("Salary", int, 1),
117 ... )
118 >>>
119 >>> async def main():
120 ... await train(
121 ... model,
122 ... {"Years": 0, "Expertise": 1, "Trust": 0.1, "Salary": 10},
123 ... {"Years": 1, "Expertise": 3, "Trust": 0.2, "Salary": 20},
124 ... {"Years": 2, "Expertise": 5, "Trust": 0.3, "Salary": 30},
125 ... {"Years": 3, "Expertise": 7, "Trust": 0.4, "Salary": 40},
126 ... )
127 >>>
128 >>> asyncio.run(main())
129 """
130 sources = _records_to_sources(*args)
131 async with sources as sources, model as model:
132 async with sources() as sctx, model() as mctx:
133 return await mctx.train(sctx)
134
135
136 async def accuracy(
137 model, *args: Union[BaseSource, Record, Dict[str, Any]]
138 ) -> float:
139 """
140 Assess the accuracy of a machine learning model.
141
142 Provide records to the model to assess the percent accuracy of its
143 prediction abilities. The model should be already instantiated and trained.
144
145 Parameters
146 ----------
147 model : Model
148 Machine Learning model to use. See :doc:`/plugins/dffml_model` for
149 models options.
150 *args : list
151 Input data for training. Could be a ``dict``, :py:class:`Record`,
152 filename, one of the data :doc:`/plugins/dffml_source`, or a filename
153 with the extension being one of the data sources.
154
155 Returns
156 -------
157 float
158 A decimal value representing the percent of the time the model made the
159 correct prediction. For some models this has another meaning. Please see
160 the documentation for the model your using for further details.
161
162 Examples
163 --------
164
165 >>> model = LinearRegressionModel(
166 ... features=Features(
167 ... DefFeature("Years", int, 1),
168 ... DefFeature("Expertise", int, 1),
169 ... DefFeature("Trust", float, 1),
170 ... ),
171 ... predict=DefFeature("Salary", int, 1),
172 ... )
173 >>>
174 >>> async def main():
175 ... print(
176 ... "Accuracy:",
177 ... await accuracy(
178 ... model,
179 ... {"Years": 4, "Expertise": 9, "Trust": 0.5, "Salary": 50},
180 ... {"Years": 5, "Expertise": 11, "Trust": 0.6, "Salary": 60},
181 ... ),
182 ... )
183 >>>
184 >>> asyncio.run(main())
185 Accuracy: 1.0
186 """
187 sources = _records_to_sources(*args)
188 async with sources as sources, model as model:
189 async with sources() as sctx, model() as mctx:
190 return float(await mctx.accuracy(sctx))
191
192
193 async def predict(
194 model,
195 *args: Union[BaseSource, Record, Dict[str, Any]],
196 update: bool = False,
197 keep_record: bool = False,
198 ):
199 """
200 Make a prediction using a machine learning model.
201
202 The model must be trained before using it to make a prediction.
203
204 Parameters
205 ----------
206 model : Model
207 Machine Learning model to use. See :doc:`/plugins/dffml_model` for
208 models options.
209 *args : list
210 Input data for prediction. Could be a ``dict``, :py:class:`Record`,
211 filename, or one of the data :doc:`/plugins/dffml_source`.
212 update : boolean, optional
213 If ``True`` prediction data within records will be written back to all
214 sources given. Defaults to ``False``.
215 keep_record : boolean, optional
216 If ``True`` the results will be kept as their ``Record`` objects instead
217 of being converted to a ``(record.key, features, predictions)`` tuple.
218 Defaults to ``False``.
219
220 Returns
221 -------
222 asynciterator
223 ``Record`` objects or ``(record.key, features, predictions)`` tuple.
224
225 Examples
226 --------
227
228 >>> model = LinearRegressionModel(
229 ... features=Features(
230 ... DefFeature("Years", int, 1),
231 ... DefFeature("Expertise", int, 1),
232 ... DefFeature("Trust", float, 1),
233 ... ),
234 ... predict=DefFeature("Salary", int, 1),
235 ... )
236 >>>
237 >>> async def main():
238 ... async for i, features, prediction in predict(
239 ... model,
240 ... {"Years": 6, "Expertise": 13, "Trust": 0.7},
241 ... {"Years": 7, "Expertise": 15, "Trust": 0.8},
242 ... ):
243 ... features["Salary"] = round(prediction["Salary"]["value"])
244 ... print(features)
245 >>>
246 >>> asyncio.run(main())
247 {'Years': 6, 'Expertise': 13, 'Trust': 0.7, 'Salary': 70.0}
248 {'Years': 7, 'Expertise': 15, 'Trust': 0.8, 'Salary': 80.0}
249 """
250 sources = _records_to_sources(*args)
251 async with sources as sources, model as model:
252 async with sources() as sctx, model() as mctx:
253 async for record in mctx.predict(sctx.records()):
254 yield record if keep_record else (
255 record.key,
256 record.features(),
257 record.predictions(),
258 )
259 if update:
260 await sctx.update(record)
261
[end of dffml/high_level.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dffml/high_level.py b/dffml/high_level.py
--- a/dffml/high_level.py
+++ b/dffml/high_level.py
@@ -3,7 +3,7 @@
in a lot of quick and dirty python files.
"""
import pathlib
-from typing import Union, Dict, Any
+from typing import Union, Dict, Any, AsyncIterator
from .record import Record
from .source.source import Sources, BaseSource
@@ -258,3 +258,60 @@
)
if update:
await sctx.update(record)
+
+
+async def load(source: BaseSource, *args: str) -> AsyncIterator[Record]:
+ """
+ Yields records from a source.
+
+ Yields all the records from the source, if record keys are given then only
+ those records are yielded.
+
+ Parameters
+ ----------
+ source : BaseSource
+ Data source to use. See :doc:`/plugins/dffml_source` for sources and
+ options.
+ *args : str
+ Records to be returned. If empty, all the records in a source will be returned.
+
+ Returns
+ -------
+ asynciterator
+ :py:class:`Record <dffml.record.Record>` object
+
+ Examples
+ --------
+
+ >>> source = CSVSource(filename="save.csv", allowempty=True, readwrite=True)
+ >>>
+ >>> async def main():
+ ... await save(
+ ... source,
+ ... Record("1", data={"features": {"A": 0, "B": 1}}),
+ ... Record("2", data={"features": {"A": 3, "B": 4}}),
+ ... )
+ ...
+ ... # All records in source
+ ... async for record in load(source):
+ ... print(record.export())
+ ...
+ ... # For specific records in a source
+ ... async for record in load(source, "1"):
+ ... print(record.export())
+ >>>
+ >>> asyncio.run(main())
+ {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
+ {'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}
+ {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
+ """
+ async with source:
+ async with source() as sctx:
+ if args:
+ # If specific records are to be loaded
+ for record in args:
+ yield await sctx.record(record)
+ else:
+ # All the records are loaded
+ async for record in sctx.records():
+ yield record
| {"golden_diff": "diff --git a/dffml/high_level.py b/dffml/high_level.py\n--- a/dffml/high_level.py\n+++ b/dffml/high_level.py\n@@ -3,7 +3,7 @@\n in a lot of quick and dirty python files.\n \"\"\"\n import pathlib\n-from typing import Union, Dict, Any\n+from typing import Union, Dict, Any, AsyncIterator\n \n from .record import Record\n from .source.source import Sources, BaseSource\n@@ -258,3 +258,60 @@\n )\n if update:\n await sctx.update(record)\n+\n+\n+async def load(source: BaseSource, *args: str) -> AsyncIterator[Record]:\n+ \"\"\"\n+ Yields records from a source.\n+\n+ Yields all the records from the source, if record keys are given then only\n+ those records are yielded.\n+\n+ Parameters\n+ ----------\n+ source : BaseSource\n+ Data source to use. See :doc:`/plugins/dffml_source` for sources and\n+ options.\n+ *args : str\n+ Records to be returned. If empty, all the records in a source will be returned.\n+\n+ Returns\n+ -------\n+ asynciterator\n+ :py:class:`Record <dffml.record.Record>` object\n+\n+ Examples\n+ --------\n+\n+ >>> source = CSVSource(filename=\"save.csv\", allowempty=True, readwrite=True)\n+ >>>\n+ >>> async def main():\n+ ... await save(\n+ ... source,\n+ ... Record(\"1\", data={\"features\": {\"A\": 0, \"B\": 1}}),\n+ ... Record(\"2\", data={\"features\": {\"A\": 3, \"B\": 4}}),\n+ ... )\n+ ...\n+ ... # All records in source\n+ ... async for record in load(source):\n+ ... print(record.export())\n+ ...\n+ ... # For specific records in a source\n+ ... async for record in load(source, \"1\"):\n+ ... print(record.export())\n+ >>>\n+ >>> asyncio.run(main())\n+ {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}\n+ {'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}\n+ {'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}\n+ \"\"\"\n+ async with source:\n+ async with source() as sctx:\n+ if args:\n+ # If specific records are to be loaded\n+ for record in args:\n+ yield await sctx.record(record)\n+ else:\n+ # All the records are loaded\n+ async for record in sctx.records():\n+ yield record\n", "issue": "feature: add a load function in high level\nAdd a load function in `high_level.py` so the data from sources can be loaded.\r\nAfter completing this, #555 can be worked on.\n", "before_files": [{"content": "\"\"\"\nHigh level abstraction interfaces to DFFML. These are probably going to be used\nin a lot of quick and dirty python files.\n\"\"\"\nimport pathlib\nfrom typing import Union, Dict, Any\n\nfrom .record import Record\nfrom .source.source import Sources, BaseSource\nfrom .source.memory import MemorySource, MemorySourceConfig\n\n\ndef _records_to_sources(*args):\n \"\"\"\n Create a memory source out of any records passed as a variable length list.\n Add all sources found in the variable length list to a list of sources, and\n the created source containing records, and return that list of sources.\n \"\"\"\n # If the first arg is an instance of sources, append the rest to that.\n if args and isinstance(args[0], Sources):\n sources = args[0]\n else:\n sources = Sources(\n *[arg for arg in args if isinstance(arg, BaseSource)]\n )\n # Records to add to memory source\n records = []\n # Make args mutable\n args = list(args)\n # Convert dicts to records\n for i, arg in enumerate(args):\n if isinstance(arg, dict):\n arg = Record(i, data={\"features\": arg})\n if isinstance(arg, Record):\n records.append(arg)\n if isinstance(arg, str) and \".\" in arg:\n filepath = pathlib.Path(arg)\n source = BaseSource.load(filepath.suffix.replace(\".\", \"\"))\n sources.append(source(filename=arg))\n # Create memory source if there are any records\n if records:\n sources.append(MemorySource(MemorySourceConfig(records=records)))\n return sources\n\n\nasync def save(source: BaseSource, *args: Record) -> None:\n \"\"\"\n Update a source's knowledge about given records.\n\n For each record given, call\n :py:func:`update <dffml.source.source.BaseSourceContext.update>` on the\n source. Effectively saving all the records to the source.\n\n Parameters\n ----------\n source : BaseSource\n Data source to use. See :doc:`/plugins/dffml_source` for sources and\n options.\n *args : list\n Records to be saved.\n\n Examples\n --------\n\n >>> source = CSVSource(filename=\"save.csv\", allowempty=True, readwrite=True)\n >>>\n >>> async def main():\n ... await save(\n ... source,\n ... Record(\n ... \"myrecord\",\n ... data={\n ... \"features\": {\"Years\": 0, \"Expertise\": 1, \"Trust\": 0.1},\n ... \"prediction\": {\"Salary\": {\"value\": 10, \"confidence\": 1.0}},\n ... }\n ... )\n ... )\n ... print(pathlib.Path(\"save.csv\").read_text().strip())\n >>>\n >>> asyncio.run(main())\n key,tag,Expertise,Trust,Years,prediction_Salary,confidence_Salary\n myrecord,untagged,1,0.1,0,10,1.0\n \"\"\"\n async with source:\n async with source() as sctx:\n for record in args:\n await sctx.update(record)\n\n\nasync def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):\n \"\"\"\n Train a machine learning model.\n\n Provide records to the model to train it. The model should be already\n instantiated.\n\n Parameters\n ----------\n model : Model\n Machine Learning model to use. See :doc:`/plugins/dffml_model` for\n models options.\n *args : list\n Input data for training. Could be a ``dict``, :py:class:`Record`,\n filename, one of the data :doc:`/plugins/dffml_source`, or a filename\n with the extension being one of the data sources.\n\n Examples\n --------\n\n >>> model = LinearRegressionModel(\n ... features=Features(\n ... DefFeature(\"Years\", int, 1),\n ... DefFeature(\"Expertise\", int, 1),\n ... DefFeature(\"Trust\", float, 1),\n ... ),\n ... predict=DefFeature(\"Salary\", int, 1),\n ... )\n >>>\n >>> async def main():\n ... await train(\n ... model,\n ... {\"Years\": 0, \"Expertise\": 1, \"Trust\": 0.1, \"Salary\": 10},\n ... {\"Years\": 1, \"Expertise\": 3, \"Trust\": 0.2, \"Salary\": 20},\n ... {\"Years\": 2, \"Expertise\": 5, \"Trust\": 0.3, \"Salary\": 30},\n ... {\"Years\": 3, \"Expertise\": 7, \"Trust\": 0.4, \"Salary\": 40},\n ... )\n >>>\n >>> asyncio.run(main())\n \"\"\"\n sources = _records_to_sources(*args)\n async with sources as sources, model as model:\n async with sources() as sctx, model() as mctx:\n return await mctx.train(sctx)\n\n\nasync def accuracy(\n model, *args: Union[BaseSource, Record, Dict[str, Any]]\n) -> float:\n \"\"\"\n Assess the accuracy of a machine learning model.\n\n Provide records to the model to assess the percent accuracy of its\n prediction abilities. The model should be already instantiated and trained.\n\n Parameters\n ----------\n model : Model\n Machine Learning model to use. See :doc:`/plugins/dffml_model` for\n models options.\n *args : list\n Input data for training. Could be a ``dict``, :py:class:`Record`,\n filename, one of the data :doc:`/plugins/dffml_source`, or a filename\n with the extension being one of the data sources.\n\n Returns\n -------\n float\n A decimal value representing the percent of the time the model made the\n correct prediction. For some models this has another meaning. Please see\n the documentation for the model your using for further details.\n\n Examples\n --------\n\n >>> model = LinearRegressionModel(\n ... features=Features(\n ... DefFeature(\"Years\", int, 1),\n ... DefFeature(\"Expertise\", int, 1),\n ... DefFeature(\"Trust\", float, 1),\n ... ),\n ... predict=DefFeature(\"Salary\", int, 1),\n ... )\n >>>\n >>> async def main():\n ... print(\n ... \"Accuracy:\",\n ... await accuracy(\n ... model,\n ... {\"Years\": 4, \"Expertise\": 9, \"Trust\": 0.5, \"Salary\": 50},\n ... {\"Years\": 5, \"Expertise\": 11, \"Trust\": 0.6, \"Salary\": 60},\n ... ),\n ... )\n >>>\n >>> asyncio.run(main())\n Accuracy: 1.0\n \"\"\"\n sources = _records_to_sources(*args)\n async with sources as sources, model as model:\n async with sources() as sctx, model() as mctx:\n return float(await mctx.accuracy(sctx))\n\n\nasync def predict(\n model,\n *args: Union[BaseSource, Record, Dict[str, Any]],\n update: bool = False,\n keep_record: bool = False,\n):\n \"\"\"\n Make a prediction using a machine learning model.\n\n The model must be trained before using it to make a prediction.\n\n Parameters\n ----------\n model : Model\n Machine Learning model to use. See :doc:`/plugins/dffml_model` for\n models options.\n *args : list\n Input data for prediction. Could be a ``dict``, :py:class:`Record`,\n filename, or one of the data :doc:`/plugins/dffml_source`.\n update : boolean, optional\n If ``True`` prediction data within records will be written back to all\n sources given. Defaults to ``False``.\n keep_record : boolean, optional\n If ``True`` the results will be kept as their ``Record`` objects instead\n of being converted to a ``(record.key, features, predictions)`` tuple.\n Defaults to ``False``.\n\n Returns\n -------\n asynciterator\n ``Record`` objects or ``(record.key, features, predictions)`` tuple.\n\n Examples\n --------\n\n >>> model = LinearRegressionModel(\n ... features=Features(\n ... DefFeature(\"Years\", int, 1),\n ... DefFeature(\"Expertise\", int, 1),\n ... DefFeature(\"Trust\", float, 1),\n ... ),\n ... predict=DefFeature(\"Salary\", int, 1),\n ... )\n >>>\n >>> async def main():\n ... async for i, features, prediction in predict(\n ... model,\n ... {\"Years\": 6, \"Expertise\": 13, \"Trust\": 0.7},\n ... {\"Years\": 7, \"Expertise\": 15, \"Trust\": 0.8},\n ... ):\n ... features[\"Salary\"] = round(prediction[\"Salary\"][\"value\"])\n ... print(features)\n >>>\n >>> asyncio.run(main())\n {'Years': 6, 'Expertise': 13, 'Trust': 0.7, 'Salary': 70.0}\n {'Years': 7, 'Expertise': 15, 'Trust': 0.8, 'Salary': 80.0}\n \"\"\"\n sources = _records_to_sources(*args)\n async with sources as sources, model as model:\n async with sources() as sctx, model() as mctx:\n async for record in mctx.predict(sctx.records()):\n yield record if keep_record else (\n record.key,\n record.features(),\n record.predictions(),\n )\n if update:\n await sctx.update(record)\n", "path": "dffml/high_level.py"}]} | 3,423 | 634 |
gh_patches_debug_65041 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-4931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Error in Telegram: "AttributeError: 'module' object has no attribute 'now'"
### Expected Behavior
<!-- Tell us what you expect to happen -->
Bot running with Telegram enabled
### Actual Behavior
<!-- Tell us what is happening -->
Bot not starting due to error message
### Your FULL config.json (remove your username, password, gmapkey and any other private info)
<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->
http://pastebin.com/5nQC2ceh
### Output when issue occurred
<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->
Traceback (most recent call last):
File "pokecli.py", line 781, in <module>
main()
File "pokecli.py", line 128, in main
bot = start_bot(bot, config)
File "pokecli.py", line 88, in start_bot
initialize_task(bot, config)
File "pokecli.py", line 79, in initialize_task
tree = TreeConfigBuilder(bot, config.raw_tasks).build()
File "/PokemonGo-Bot/pokemongo_bot/tree_config_builder.py", line 79, in build
instance = worker(self.bot, task_config)
File "/PokemonGo-Bot/pokemongo_bot/base_task.py", line 23, in **init**
self.initialize()
File "/PokemonGo-Bot/pokemongo_bot/cell_workers/telegram_task.py", line 42, in initialize
self.next_job=datetime.now() + timedelta(seconds=self.min_interval)
AttributeError: 'module' object has no attribute 'now'
### Steps to Reproduce
<!-- Tell us the steps you have taken to reproduce the issue -->
Start the bot with the above config.
### Other Information
OS: CentOS
<!-- Tell us what Operating system you're using -->
Branch: dev
<!-- dev or master -->
Git Commit: 9e81c6ed90d79e181599ec7f0a0cfa2ecd4d09f5
<!-- run 'git log -n 1 --pretty=format:"%H"' -->
Python Version: Python 2.7.5
<!-- run 'python -V' and paste it here) -->
Any other relevant files/configs (eg: path files)
<!-- Anything else which may be of relevance -->
<!-- ===============END OF ISSUE SECTION=============== -->
</issue>
<code>
[start of pokemongo_bot/cell_workers/telegram_task.py]
1 # -*- coding: utf-8 -*-
2 import datetime
3 import telegram
4 import os
5 import logging
6 import json
7 from pokemongo_bot.base_task import BaseTask
8 from pokemongo_bot.base_dir import _base_dir
9 from pokemongo_bot.event_handlers import TelegramHandler
10
11 from pprint import pprint
12 import re
13
14 class FileIOException(Exception):
15 pass
16
17 class TelegramTask(BaseTask):
18 SUPPORTED_TASK_API_VERSION = 1
19 update_id = None
20 tbot = None
21 min_interval=None
22 next_job=None
23
24 def initialize(self):
25 if not self.enabled:
26 return
27 api_key = self.bot.config.telegram_token
28 if api_key == None:
29 self.emit_event(
30 'config_error',
31 formatted='api_key not defined.'
32 )
33 return
34 self.tbot = telegram.Bot(api_key)
35 if self.config.get('master',None):
36 self.bot.event_manager.add_handler(TelegramHandler(self.tbot,self.config.get('master',None),self.config.get('alert_catch')))
37 try:
38 self.update_id = self.tbot.getUpdates()[0].update_id
39 except IndexError:
40 self.update_id = None
41 self.min_interval=self.config.get('min_interval',120)
42 self.next_job=datetime.now() + timedelta(seconds=self.min_interval)
43 def work(self):
44 if not self.enabled:
45 return
46 if datetime.now()<self.next_job:
47 return
48 self.next_job=datetime.now() + timedelta(seconds=self.min_interval)
49 for update in self.tbot.getUpdates(offset=self.update_id, timeout=10):
50 self.update_id = update.update_id+1
51 if update.message:
52 self.bot.logger.info("message from {} ({}): {}".format(update.message.from_user.username, update.message.from_user.id, update.message.text))
53 if self.config.get('master',None) and self.config.get('master',None) not in [update.message.from_user.id, "@{}".format(update.message.from_user.username)]:
54 self.emit_event(
55 'debug',
56 formatted="Master wrong: expecting {}, got {}({})".format(self.config.get('master',None), update.message.from_user.username, update.message.from_user.id))
57 continue
58 else:
59 if not re.match(r'^[0-9]+$', "{}".format(self.config['master'])): # master was not numeric...
60 self.config['master'] = update.message.chat_id
61 idx = (i for i,v in enumerate(self.bot.event_manager._handlers) if type(v) is TelegramHandler).next()
62 self.bot.event_manager._handlers[idx] = TelegramHandler(self.tbot,self.config['master'], self.config.get('alert_catch'))
63
64
65
66 if update.message.text == "/info":
67 stats = self._get_player_stats()
68 if stats:
69 with self.bot.database as conn:
70 cur = conn.cursor()
71 cur.execute("SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
72 catch_day = cur.fetchone()[0]
73 cur.execute("SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')")
74 ps_day = cur.fetchone()[0]
75 res = (
76 "*"+self.bot.config.username+"*",
77 "_Level:_ "+str(stats["level"]),
78 "_XP:_ "+str(stats["experience"])+"/"+str(stats["next_level_xp"]),
79 "_Pokemons Captured:_ "+str(stats["pokemons_captured"])+" ("+str(catch_day)+" _last 24h_)",
80 "_Poke Stop Visits:_ "+str(stats["poke_stop_visits"])+" ("+str(ps_day)+" _last 24h_)",
81 "_KM Walked:_ "+str(stats["km_walked"])
82 )
83 self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text="\n".join(res))
84 self.tbot.send_location(chat_id=update.message.chat_id, latitude=self.bot.api._position_lat, longitude=self.bot.api._position_lng)
85 else:
86 self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text="Stats not loaded yet\n")
87 elif update.message.text == "/start" or update.message.text == "/help":
88 res = (
89 "Commands: ",
90 "/info - info about bot"
91 )
92 self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text="\n".join(res))
93
94 def _get_player_stats(self):
95 """
96 Helper method parsing the bot inventory object and returning the player stats object.
97 :return: The player stats object.
98 :rtype: dict
99 """
100 web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
101
102 try:
103 with open(web_inventory, "r") as infile:
104 json_inventory = json.load(infile)
105 except ValueError as e:
106 # Unable to read json from web inventory
107 # File may be corrupt. Create a new one.
108 self.bot.logger.info('[x] Error while opening inventory file for read: %s' % e)
109 json_inventory = []
110 except:
111 raise FileIOException("Unexpected error reading from {}".web_inventory)
112
113 return next((x["inventory_item_data"]["player_stats"]
114 for x in json_inventory
115 if x.get("inventory_item_data", {}).get("player_stats", {})),
116 None)
117
[end of pokemongo_bot/cell_workers/telegram_task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/telegram_task.py b/pokemongo_bot/cell_workers/telegram_task.py
--- a/pokemongo_bot/cell_workers/telegram_task.py
+++ b/pokemongo_bot/cell_workers/telegram_task.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
-import datetime
+from datetime import datetime
+from datetime import timedelta
import telegram
import os
import logging
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/telegram_task.py b/pokemongo_bot/cell_workers/telegram_task.py\n--- a/pokemongo_bot/cell_workers/telegram_task.py\n+++ b/pokemongo_bot/cell_workers/telegram_task.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n-import datetime\n+from datetime import datetime\n+from datetime import timedelta\n import telegram\n import os\n import logging\n", "issue": "[BUG] Error in Telegram: \"AttributeError: 'module' object has no attribute 'now'\"\n### Expected Behavior\n\n<!-- Tell us what you expect to happen -->\n\nBot running with Telegram enabled\n### Actual Behavior\n\n<!-- Tell us what is happening -->\n\nBot not starting due to error message\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\n<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->\n\nhttp://pastebin.com/5nQC2ceh\n### Output when issue occurred\n\n<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->\n\nTraceback (most recent call last):\n File \"pokecli.py\", line 781, in <module>\n main()\n File \"pokecli.py\", line 128, in main\n bot = start_bot(bot, config)\n File \"pokecli.py\", line 88, in start_bot\n initialize_task(bot, config)\n File \"pokecli.py\", line 79, in initialize_task\n tree = TreeConfigBuilder(bot, config.raw_tasks).build()\n File \"/PokemonGo-Bot/pokemongo_bot/tree_config_builder.py\", line 79, in build\n instance = worker(self.bot, task_config)\n File \"/PokemonGo-Bot/pokemongo_bot/base_task.py\", line 23, in **init**\n self.initialize()\n File \"/PokemonGo-Bot/pokemongo_bot/cell_workers/telegram_task.py\", line 42, in initialize\n self.next_job=datetime.now() + timedelta(seconds=self.min_interval)\nAttributeError: 'module' object has no attribute 'now'\n### Steps to Reproduce\n\n<!-- Tell us the steps you have taken to reproduce the issue -->\n\nStart the bot with the above config.\n### Other Information\n\nOS: CentOS\n\n<!-- Tell us what Operating system you're using --> \n\nBranch: dev\n\n<!-- dev or master --> \n\nGit Commit: 9e81c6ed90d79e181599ec7f0a0cfa2ecd4d09f5\n\n<!-- run 'git log -n 1 --pretty=format:\"%H\"' --> \n\nPython Version: Python 2.7.5\n\n<!-- run 'python -V' and paste it here) --> \n\nAny other relevant files/configs (eg: path files) \n\n<!-- Anything else which may be of relevance -->\n\n<!-- ===============END OF ISSUE SECTION=============== -->\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport telegram\nimport os\nimport logging\nimport json\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.base_dir import _base_dir\nfrom pokemongo_bot.event_handlers import TelegramHandler\n\nfrom pprint import pprint\nimport re\n\nclass FileIOException(Exception):\n pass\n\nclass TelegramTask(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n update_id = None\n tbot = None\n min_interval=None\n next_job=None\n \n def initialize(self):\n if not self.enabled:\n return\n api_key = self.bot.config.telegram_token\n if api_key == None:\n self.emit_event(\n 'config_error',\n formatted='api_key not defined.'\n )\n return\n self.tbot = telegram.Bot(api_key)\n if self.config.get('master',None):\n self.bot.event_manager.add_handler(TelegramHandler(self.tbot,self.config.get('master',None),self.config.get('alert_catch')))\n try:\n self.update_id = self.tbot.getUpdates()[0].update_id\n except IndexError:\n self.update_id = None\n self.min_interval=self.config.get('min_interval',120)\n self.next_job=datetime.now() + timedelta(seconds=self.min_interval)\n def work(self):\n if not self.enabled:\n return\n if datetime.now()<self.next_job:\n return\n self.next_job=datetime.now() + timedelta(seconds=self.min_interval)\n for update in self.tbot.getUpdates(offset=self.update_id, timeout=10):\n self.update_id = update.update_id+1\n if update.message:\n self.bot.logger.info(\"message from {} ({}): {}\".format(update.message.from_user.username, update.message.from_user.id, update.message.text))\n if self.config.get('master',None) and self.config.get('master',None) not in [update.message.from_user.id, \"@{}\".format(update.message.from_user.username)]:\n self.emit_event( \n 'debug', \n formatted=\"Master wrong: expecting {}, got {}({})\".format(self.config.get('master',None), update.message.from_user.username, update.message.from_user.id))\n continue\n else:\n if not re.match(r'^[0-9]+$', \"{}\".format(self.config['master'])): # master was not numeric...\n self.config['master'] = update.message.chat_id\n idx = (i for i,v in enumerate(self.bot.event_manager._handlers) if type(v) is TelegramHandler).next()\n self.bot.event_manager._handlers[idx] = TelegramHandler(self.tbot,self.config['master'], self.config.get('alert_catch'))\n \n\n\n if update.message.text == \"/info\":\n stats = self._get_player_stats()\n if stats:\n with self.bot.database as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')\")\n catch_day = cur.fetchone()[0]\n cur.execute(\"SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')\")\n ps_day = cur.fetchone()[0]\n res = (\n \"*\"+self.bot.config.username+\"*\",\n \"_Level:_ \"+str(stats[\"level\"]),\n \"_XP:_ \"+str(stats[\"experience\"])+\"/\"+str(stats[\"next_level_xp\"]),\n \"_Pokemons Captured:_ \"+str(stats[\"pokemons_captured\"])+\" (\"+str(catch_day)+\" _last 24h_)\",\n \"_Poke Stop Visits:_ \"+str(stats[\"poke_stop_visits\"])+\" (\"+str(ps_day)+\" _last 24h_)\",\n \"_KM Walked:_ \"+str(stats[\"km_walked\"])\n )\n self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n self.tbot.send_location(chat_id=update.message.chat_id, latitude=self.bot.api._position_lat, longitude=self.bot.api._position_lng)\n else:\n self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text=\"Stats not loaded yet\\n\")\n elif update.message.text == \"/start\" or update.message.text == \"/help\":\n res = (\n \"Commands: \",\n \"/info - info about bot\"\n )\n self.tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n\n def _get_player_stats(self):\n \"\"\"\n Helper method parsing the bot inventory object and returning the player stats object.\n :return: The player stats object.\n :rtype: dict\n \"\"\"\n web_inventory = os.path.join(_base_dir, \"web\", \"inventory-%s.json\" % self.bot.config.username)\n \n try:\n with open(web_inventory, \"r\") as infile:\n json_inventory = json.load(infile)\n except ValueError as e:\n # Unable to read json from web inventory\n # File may be corrupt. Create a new one. \n self.bot.logger.info('[x] Error while opening inventory file for read: %s' % e)\n json_inventory = []\n except:\n raise FileIOException(\"Unexpected error reading from {}\".web_inventory)\n \n return next((x[\"inventory_item_data\"][\"player_stats\"]\n for x in json_inventory\n if x.get(\"inventory_item_data\", {}).get(\"player_stats\", {})),\n None)\n", "path": "pokemongo_bot/cell_workers/telegram_task.py"}]} | 2,485 | 102 |
gh_patches_debug_6721 | rasdani/github-patches | git_diff | microsoft__Qcodes-5565 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot exit ipython with running qcodes.Monitor()
### Steps to reproduce
iPython version 8.10, qcodes version 0.42, Windows 10 Pro 22H2
1. open ipython
2. from qcodes import Monitor
monitor = Monitor()
3. exit ipython
It hangs instead of closing iPython. The terminal must be terminated.
</issue>
<code>
[start of src/qcodes/monitor/monitor.py]
1 #! /usr/bin/env python
2 # vim:fenc=utf-8
3 #
4 # Copyright © 2017 unga <[email protected]>
5 #
6 # Distributed under terms of the MIT license.
7 """
8 Monitor a set of parameters in a background thread
9 stream output over websocket
10
11 To start monitor, run this file, or if qcodes is installed as a module:
12
13 ``% python -m qcodes.monitor.monitor``
14
15 Add parameters to monitor in your measurement by creating a new monitor with a
16 list of parameters to monitor:
17
18 ``monitor = qcodes.Monitor(param1, param2, param3, ...)``
19 """
20 from __future__ import annotations
21
22 import asyncio
23 import json
24 import logging
25 import os
26 import socketserver
27 import time
28 import webbrowser
29 from asyncio import CancelledError
30 from collections import defaultdict
31 from collections.abc import Awaitable, Sequence
32 from contextlib import suppress
33 from importlib.resources import as_file, files
34 from threading import Event, Thread
35 from typing import Any, Callable
36
37 import websockets
38 import websockets.exceptions
39 import websockets.server
40
41 from qcodes.parameters import Parameter
42
43 WEBSOCKET_PORT = 5678
44 SERVER_PORT = 3000
45
46 log = logging.getLogger(__name__)
47
48
49 def _get_metadata(
50 *parameters: Parameter, use_root_instrument: bool = True
51 ) -> dict[str, Any]:
52 """
53 Return a dictionary that contains the parameter metadata grouped by the
54 instrument it belongs to.
55 """
56 metadata_timestamp = time.time()
57 # group metadata by instrument
58 metas: dict[Any, Any] = defaultdict(list)
59 for parameter in parameters:
60 # Get the latest value from the parameter,
61 # respecting the max_val_age parameter
62 meta: dict[str, float | str | None] = {}
63 meta["value"] = str(parameter.get_latest())
64 timestamp = parameter.get_latest.get_timestamp()
65 if timestamp is not None:
66 meta["ts"] = timestamp.timestamp()
67 else:
68 meta["ts"] = None
69 meta["name"] = parameter.label or parameter.name
70 meta["unit"] = parameter.unit
71
72 # find the base instrument that this parameter belongs to
73 if use_root_instrument:
74 baseinst = parameter.root_instrument
75 else:
76 baseinst = parameter.instrument
77 if baseinst is None:
78 metas["Unbound Parameter"].append(meta)
79 else:
80 metas[str(baseinst)].append(meta)
81
82 # Create list of parameters, grouped by instrument
83 parameters_out = []
84 for instrument in metas:
85 temp = {"instrument": instrument, "parameters": metas[instrument]}
86 parameters_out.append(temp)
87
88 state = {"ts": metadata_timestamp, "parameters": parameters_out}
89 return state
90
91
92 def _handler(
93 parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True
94 ) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:
95 """
96 Return the websockets server handler.
97 """
98
99 async def server_func(
100 websocket: websockets.server.WebSocketServerProtocol, _: str
101 ) -> None:
102 """
103 Create a websockets handler that sends parameter values to a listener
104 every "interval" seconds.
105 """
106 while True:
107 try:
108 # Update the parameter values
109 try:
110 meta = _get_metadata(
111 *parameters, use_root_instrument=use_root_instrument
112 )
113 except ValueError:
114 log.exception("Error getting parameters")
115 break
116 log.debug("sending.. to %r", websocket)
117 await websocket.send(json.dumps(meta))
118 # Wait for interval seconds and then send again
119 await asyncio.sleep(interval)
120 except (CancelledError, websockets.exceptions.ConnectionClosed):
121 log.debug("Got CancelledError or ConnectionClosed",
122 exc_info=True)
123 break
124 log.debug("Closing websockets connection")
125
126 return server_func
127
128
129 class Monitor(Thread):
130 """
131 QCodes Monitor - WebSockets server to monitor qcodes parameters.
132 """
133 running = None
134
135 def __init__(
136 self,
137 *parameters: Parameter,
138 interval: float = 1,
139 use_root_instrument: bool = True,
140 ):
141 """
142 Monitor qcodes parameters.
143
144 Args:
145 *parameters: Parameters to monitor.
146 interval: How often one wants to refresh the values.
147 use_root_instrument: Defines if parameters are grouped according to
148 parameter.root_instrument or parameter.instrument
149 """
150 super().__init__()
151
152 # Check that all values are valid parameters
153 for parameter in parameters:
154 if not isinstance(parameter, Parameter):
155 raise TypeError(f"We can only monitor QCodes "
156 f"Parameters, not {type(parameter)}")
157
158 self.loop: asyncio.AbstractEventLoop | None = None
159 self._stop_loop_future: asyncio.Future | None = None
160 self._parameters = parameters
161 self.loop_is_closed = Event()
162 self.server_is_started = Event()
163 self.handler = _handler(
164 parameters, interval=interval, use_root_instrument=use_root_instrument
165 )
166 log.debug("Start monitoring thread")
167 if Monitor.running:
168 # stop the old server
169 log.debug("Stopping and restarting server")
170 Monitor.running.stop()
171 self.start()
172
173 # Wait until the loop is running
174 self.server_is_started.wait(timeout=5)
175 if not self.server_is_started.is_set():
176 raise RuntimeError("Failed to start server")
177 Monitor.running = self
178
179 def run(self) -> None:
180 """
181 Start the event loop and run forever.
182 """
183 log.debug("Running Websocket server")
184
185 async def run_loop() -> None:
186 self.loop = asyncio.get_running_loop()
187 self._stop_loop_future = self.loop.create_future()
188
189 async with websockets.server.serve(
190 self.handler, "127.0.0.1", WEBSOCKET_PORT, close_timeout=1
191 ):
192 self.server_is_started.set()
193 try:
194 await self._stop_loop_future
195 except asyncio.CancelledError:
196 log.debug("Websocket server thread shutting down")
197
198 try:
199 asyncio.run(run_loop())
200 finally:
201 self.loop_is_closed.set()
202
203 def update_all(self) -> None:
204 """
205 Update all parameters in the monitor.
206 """
207 for parameter in self._parameters:
208 # call get if it can be called without arguments
209 with suppress(TypeError):
210 parameter.get()
211
212 def stop(self) -> None:
213 """
214 Shutdown the server, close the event loop and join the thread.
215 Setting active Monitor to ``None``.
216 """
217 self.join()
218 Monitor.running = None
219
220 def join(self, timeout: float | None = None) -> None:
221 """
222 Overwrite ``Thread.join`` to make sure server is stopped before
223 joining avoiding a potential deadlock.
224 """
225 log.debug("Shutting down server")
226 if not self.is_alive():
227 # we run this check before trying to run to prevent a cryptic
228 # error message
229 log.debug("monitor is dead")
230 return
231 try:
232 if self.loop is not None and self._stop_loop_future is not None:
233 log.debug("Instructing server to stop event loop.")
234 self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)
235 else:
236 log.debug("No event loop found. Cannot stop event loop.")
237 except RuntimeError:
238 # the above may throw a runtime error if the loop is already
239 # stopped in which case there is nothing more to do
240 log.exception("Could not close loop")
241 self.loop_is_closed.wait(timeout=5)
242 if not self.loop_is_closed.is_set():
243 raise RuntimeError("Failed to join loop")
244 log.debug("Loop reported closed")
245 super().join(timeout=timeout)
246 log.debug("Monitor Thread has joined")
247
248 @staticmethod
249 def show() -> None:
250 """
251 Overwrite this method to show/raise your monitor GUI
252 F.ex.
253
254 ::
255
256 import webbrowser
257 url = "localhost:3000"
258 # Open URL in new window, raising the window if possible.
259 webbrowser.open_new(url)
260
261 """
262 webbrowser.open(f"http://localhost:{SERVER_PORT}")
263
264
265 def main() -> None:
266 import http.server
267
268 # If this file is run, create a simple webserver that serves a simple
269 # website that can be used to view monitored parameters.
270 # # https://github.com/python/mypy/issues/4182
271 parent_module = ".".join(__loader__.name.split(".")[:-1]) # type: ignore[name-defined]
272
273 static_dir = files(parent_module).joinpath("dist")
274 try:
275 with as_file(static_dir) as extracted_dir:
276 os.chdir(extracted_dir)
277 log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
278 with socketserver.TCPServer(
279 ("", SERVER_PORT), http.server.SimpleHTTPRequestHandler
280 ) as httpd:
281 log.debug("serving directory %s", static_dir)
282 webbrowser.open(f"http://localhost:{SERVER_PORT}")
283 httpd.serve_forever()
284 except KeyboardInterrupt:
285 log.info("Shutting Down HTTP Server")
286
287
288 if __name__ == "__main__":
289 main()
290
[end of src/qcodes/monitor/monitor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py
--- a/src/qcodes/monitor/monitor.py
+++ b/src/qcodes/monitor/monitor.py
@@ -147,7 +147,7 @@
use_root_instrument: Defines if parameters are grouped according to
parameter.root_instrument or parameter.instrument
"""
- super().__init__()
+ super().__init__(daemon=True)
# Check that all values are valid parameters
for parameter in parameters:
| {"golden_diff": "diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py\n--- a/src/qcodes/monitor/monitor.py\n+++ b/src/qcodes/monitor/monitor.py\n@@ -147,7 +147,7 @@\n use_root_instrument: Defines if parameters are grouped according to\n parameter.root_instrument or parameter.instrument\n \"\"\"\n- super().__init__()\n+ super().__init__(daemon=True)\n \n # Check that all values are valid parameters\n for parameter in parameters:\n", "issue": "Cannot exit ipython with running qcodes.Monitor()\n### Steps to reproduce\r\niPython version 8.10, qcodes version 0.42, Windows 10 Pro 22H2\r\n\r\n1. open ipython\r\n2. from qcodes import Monitor\r\nmonitor = Monitor()\r\n3. exit ipython\r\n\r\nIt hangs instead of closing iPython. The terminal must be terminated.\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n# vim:fenc=utf-8\n#\n# Copyright \u00a9 2017 unga <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\"\"\"\nMonitor a set of parameters in a background thread\nstream output over websocket\n\nTo start monitor, run this file, or if qcodes is installed as a module:\n\n``% python -m qcodes.monitor.monitor``\n\nAdd parameters to monitor in your measurement by creating a new monitor with a\nlist of parameters to monitor:\n\n``monitor = qcodes.Monitor(param1, param2, param3, ...)``\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport os\nimport socketserver\nimport time\nimport webbrowser\nfrom asyncio import CancelledError\nfrom collections import defaultdict\nfrom collections.abc import Awaitable, Sequence\nfrom contextlib import suppress\nfrom importlib.resources import as_file, files\nfrom threading import Event, Thread\nfrom typing import Any, Callable\n\nimport websockets\nimport websockets.exceptions\nimport websockets.server\n\nfrom qcodes.parameters import Parameter\n\nWEBSOCKET_PORT = 5678\nSERVER_PORT = 3000\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_metadata(\n *parameters: Parameter, use_root_instrument: bool = True\n) -> dict[str, Any]:\n \"\"\"\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n \"\"\"\n metadata_timestamp = time.time()\n # group metadata by instrument\n metas: dict[Any, Any] = defaultdict(list)\n for parameter in parameters:\n # Get the latest value from the parameter,\n # respecting the max_val_age parameter\n meta: dict[str, float | str | None] = {}\n meta[\"value\"] = str(parameter.get_latest())\n timestamp = parameter.get_latest.get_timestamp()\n if timestamp is not None:\n meta[\"ts\"] = timestamp.timestamp()\n else:\n meta[\"ts\"] = None\n meta[\"name\"] = parameter.label or parameter.name\n meta[\"unit\"] = parameter.unit\n\n # find the base instrument that this parameter belongs to\n if use_root_instrument:\n baseinst = parameter.root_instrument\n else:\n baseinst = parameter.instrument\n if baseinst is None:\n metas[\"Unbound Parameter\"].append(meta)\n else:\n metas[str(baseinst)].append(meta)\n\n # Create list of parameters, grouped by instrument\n parameters_out = []\n for instrument in metas:\n temp = {\"instrument\": instrument, \"parameters\": metas[instrument]}\n parameters_out.append(temp)\n\n state = {\"ts\": metadata_timestamp, \"parameters\": parameters_out}\n return state\n\n\ndef _handler(\n parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True\n) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:\n \"\"\"\n Return the websockets server handler.\n \"\"\"\n\n async def server_func(\n websocket: websockets.server.WebSocketServerProtocol, _: str\n ) -> None:\n \"\"\"\n Create a websockets handler that sends parameter values to a listener\n every \"interval\" seconds.\n \"\"\"\n while True:\n try:\n # Update the parameter values\n try:\n meta = _get_metadata(\n *parameters, use_root_instrument=use_root_instrument\n )\n except ValueError:\n log.exception(\"Error getting parameters\")\n break\n log.debug(\"sending.. to %r\", websocket)\n await websocket.send(json.dumps(meta))\n # Wait for interval seconds and then send again\n await asyncio.sleep(interval)\n except (CancelledError, websockets.exceptions.ConnectionClosed):\n log.debug(\"Got CancelledError or ConnectionClosed\",\n exc_info=True)\n break\n log.debug(\"Closing websockets connection\")\n\n return server_func\n\n\nclass Monitor(Thread):\n \"\"\"\n QCodes Monitor - WebSockets server to monitor qcodes parameters.\n \"\"\"\n running = None\n\n def __init__(\n self,\n *parameters: Parameter,\n interval: float = 1,\n use_root_instrument: bool = True,\n ):\n \"\"\"\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n use_root_instrument: Defines if parameters are grouped according to\n parameter.root_instrument or parameter.instrument\n \"\"\"\n super().__init__()\n\n # Check that all values are valid parameters\n for parameter in parameters:\n if not isinstance(parameter, Parameter):\n raise TypeError(f\"We can only monitor QCodes \"\n f\"Parameters, not {type(parameter)}\")\n\n self.loop: asyncio.AbstractEventLoop | None = None\n self._stop_loop_future: asyncio.Future | None = None\n self._parameters = parameters\n self.loop_is_closed = Event()\n self.server_is_started = Event()\n self.handler = _handler(\n parameters, interval=interval, use_root_instrument=use_root_instrument\n )\n log.debug(\"Start monitoring thread\")\n if Monitor.running:\n # stop the old server\n log.debug(\"Stopping and restarting server\")\n Monitor.running.stop()\n self.start()\n\n # Wait until the loop is running\n self.server_is_started.wait(timeout=5)\n if not self.server_is_started.is_set():\n raise RuntimeError(\"Failed to start server\")\n Monitor.running = self\n\n def run(self) -> None:\n \"\"\"\n Start the event loop and run forever.\n \"\"\"\n log.debug(\"Running Websocket server\")\n\n async def run_loop() -> None:\n self.loop = asyncio.get_running_loop()\n self._stop_loop_future = self.loop.create_future()\n\n async with websockets.server.serve(\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n try:\n await self._stop_loop_future\n except asyncio.CancelledError:\n log.debug(\"Websocket server thread shutting down\")\n\n try:\n asyncio.run(run_loop())\n finally:\n self.loop_is_closed.set()\n\n def update_all(self) -> None:\n \"\"\"\n Update all parameters in the monitor.\n \"\"\"\n for parameter in self._parameters:\n # call get if it can be called without arguments\n with suppress(TypeError):\n parameter.get()\n\n def stop(self) -> None:\n \"\"\"\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n \"\"\"\n self.join()\n Monitor.running = None\n\n def join(self, timeout: float | None = None) -> None:\n \"\"\"\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n \"\"\"\n log.debug(\"Shutting down server\")\n if not self.is_alive():\n # we run this check before trying to run to prevent a cryptic\n # error message\n log.debug(\"monitor is dead\")\n return\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n # the above may throw a runtime error if the loop is already\n # stopped in which case there is nothing more to do\n log.exception(\"Could not close loop\")\n self.loop_is_closed.wait(timeout=5)\n if not self.loop_is_closed.is_set():\n raise RuntimeError(\"Failed to join loop\")\n log.debug(\"Loop reported closed\")\n super().join(timeout=timeout)\n log.debug(\"Monitor Thread has joined\")\n\n @staticmethod\n def show() -> None:\n \"\"\"\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = \"localhost:3000\"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n \"\"\"\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n\n\ndef main() -> None:\n import http.server\n\n # If this file is run, create a simple webserver that serves a simple\n # website that can be used to view monitored parameters.\n # # https://github.com/python/mypy/issues/4182\n parent_module = \".\".join(__loader__.name.split(\".\")[:-1]) # type: ignore[name-defined]\n\n static_dir = files(parent_module).joinpath(\"dist\")\n try:\n with as_file(static_dir) as extracted_dir:\n os.chdir(extracted_dir)\n log.info(\"Starting HTTP Server at http://localhost:%i\", SERVER_PORT)\n with socketserver.TCPServer(\n (\"\", SERVER_PORT), http.server.SimpleHTTPRequestHandler\n ) as httpd:\n log.debug(\"serving directory %s\", static_dir)\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n log.info(\"Shutting Down HTTP Server\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/qcodes/monitor/monitor.py"}]} | 3,377 | 115 |
gh_patches_debug_21742 | rasdani/github-patches | git_diff | pypa__pip-7326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Call for clarification: acceptable values for build-system.requires in pyproject.toml
**Environment**
* pip version: 19.0.3
* Python version: 3.7
* OS: GNU/Linux
<!-- Feel free to add more information about your environment here -->
**Description**
<!-- A clear and concise description of what the bug is. -->
It's not clear nor is clearly specified by PEP518. But I was having a need to have an in-tree build back-end.
So I've hacked it via installing with a relative path.
*And it works!*
Yet, @pradyunsg has pointed out that the way I used probably doesn't conform to PEP 508.
So I tried some other ways to point to the in-tree distribution. And those didn't work.
**How to Reproduce**
<!-- Describe the steps to reproduce this bug. -->
(this works)
```toml
[build-system]
requires = ["./build-aiohttp", ]
build-backend = "build_aiohttp.api"
# not yet supported, so it doesn't influence anything, it's for forward-compat:
backend-path = "./build-aiohttp"
```
But if instead of `"./build-aiohttp"` in `requires` I try any of `"file://build-aiohttp"`, `"file:///./build-aiohttp"`, `"build_aiohttp @ file://./build-aiohttp"`, `"build_aiohttp @ file:./build-aiohttp"` pip fails to recognize those as installables.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
I don't know what to expect. The method which works seems to be undefined in PEPs so I probably shouldn't rely on it.
Pip may either decide to improve the filtering of `requires` option or document it being permissive...
P.S. Oh and, by the way, I was able to test my other PEP517 backend outside of the project tree via
```toml
[build-system]
requires = ["../fortunate_pkg"]
```
so this relative path feature proves to be quite useful for development/debugging purposes.
</issue>
<code>
[start of src/pip/_internal/pyproject.py]
1 from __future__ import absolute_import
2
3 import io
4 import os
5 import sys
6
7 from pip._vendor import pytoml, six
8
9 from pip._internal.exceptions import InstallationError
10 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
11
12 if MYPY_CHECK_RUNNING:
13 from typing import Any, Tuple, Optional, List
14
15
16 def _is_list_of_str(obj):
17 # type: (Any) -> bool
18 return (
19 isinstance(obj, list) and
20 all(isinstance(item, six.string_types) for item in obj)
21 )
22
23
24 def make_pyproject_path(unpacked_source_directory):
25 # type: (str) -> str
26 path = os.path.join(unpacked_source_directory, 'pyproject.toml')
27
28 # Python2 __file__ should not be unicode
29 if six.PY2 and isinstance(path, six.text_type):
30 path = path.encode(sys.getfilesystemencoding())
31
32 return path
33
34
35 def load_pyproject_toml(
36 use_pep517, # type: Optional[bool]
37 pyproject_toml, # type: str
38 setup_py, # type: str
39 req_name # type: str
40 ):
41 # type: (...) -> Optional[Tuple[List[str], str, List[str]]]
42 """Load the pyproject.toml file.
43
44 Parameters:
45 use_pep517 - Has the user requested PEP 517 processing? None
46 means the user hasn't explicitly specified.
47 pyproject_toml - Location of the project's pyproject.toml file
48 setup_py - Location of the project's setup.py file
49 req_name - The name of the requirement we're processing (for
50 error reporting)
51
52 Returns:
53 None if we should use the legacy code path, otherwise a tuple
54 (
55 requirements from pyproject.toml,
56 name of PEP 517 backend,
57 requirements we should check are installed after setting
58 up the build environment
59 )
60 """
61 has_pyproject = os.path.isfile(pyproject_toml)
62 has_setup = os.path.isfile(setup_py)
63
64 if has_pyproject:
65 with io.open(pyproject_toml, encoding="utf-8") as f:
66 pp_toml = pytoml.load(f)
67 build_system = pp_toml.get("build-system")
68 else:
69 build_system = None
70
71 # The following cases must use PEP 517
72 # We check for use_pep517 being non-None and falsey because that means
73 # the user explicitly requested --no-use-pep517. The value 0 as
74 # opposed to False can occur when the value is provided via an
75 # environment variable or config file option (due to the quirk of
76 # strtobool() returning an integer in pip's configuration code).
77 if has_pyproject and not has_setup:
78 if use_pep517 is not None and not use_pep517:
79 raise InstallationError(
80 "Disabling PEP 517 processing is invalid: "
81 "project does not have a setup.py"
82 )
83 use_pep517 = True
84 elif build_system and "build-backend" in build_system:
85 if use_pep517 is not None and not use_pep517:
86 raise InstallationError(
87 "Disabling PEP 517 processing is invalid: "
88 "project specifies a build backend of {} "
89 "in pyproject.toml".format(
90 build_system["build-backend"]
91 )
92 )
93 use_pep517 = True
94
95 # If we haven't worked out whether to use PEP 517 yet,
96 # and the user hasn't explicitly stated a preference,
97 # we do so if the project has a pyproject.toml file.
98 elif use_pep517 is None:
99 use_pep517 = has_pyproject
100
101 # At this point, we know whether we're going to use PEP 517.
102 assert use_pep517 is not None
103
104 # If we're using the legacy code path, there is nothing further
105 # for us to do here.
106 if not use_pep517:
107 return None
108
109 if build_system is None:
110 # Either the user has a pyproject.toml with no build-system
111 # section, or the user has no pyproject.toml, but has opted in
112 # explicitly via --use-pep517.
113 # In the absence of any explicit backend specification, we
114 # assume the setuptools backend that most closely emulates the
115 # traditional direct setup.py execution, and require wheel and
116 # a version of setuptools that supports that backend.
117
118 build_system = {
119 "requires": ["setuptools>=40.8.0", "wheel"],
120 "build-backend": "setuptools.build_meta:__legacy__",
121 }
122
123 # If we're using PEP 517, we have build system information (either
124 # from pyproject.toml, or defaulted by the code above).
125 # Note that at this point, we do not know if the user has actually
126 # specified a backend, though.
127 assert build_system is not None
128
129 # Ensure that the build-system section in pyproject.toml conforms
130 # to PEP 518.
131 error_template = (
132 "{package} has a pyproject.toml file that does not comply "
133 "with PEP 518: {reason}"
134 )
135
136 # Specifying the build-system table but not the requires key is invalid
137 if "requires" not in build_system:
138 raise InstallationError(
139 error_template.format(package=req_name, reason=(
140 "it has a 'build-system' table but not "
141 "'build-system.requires' which is mandatory in the table"
142 ))
143 )
144
145 # Error out if requires is not a list of strings
146 requires = build_system["requires"]
147 if not _is_list_of_str(requires):
148 raise InstallationError(error_template.format(
149 package=req_name,
150 reason="'build-system.requires' is not a list of strings.",
151 ))
152
153 backend = build_system.get("build-backend")
154 check = [] # type: List[str]
155 if backend is None:
156 # If the user didn't specify a backend, we assume they want to use
157 # the setuptools backend. But we can't be sure they have included
158 # a version of setuptools which supplies the backend, or wheel
159 # (which is needed by the backend) in their requirements. So we
160 # make a note to check that those requirements are present once
161 # we have set up the environment.
162 # This is quite a lot of work to check for a very specific case. But
163 # the problem is, that case is potentially quite common - projects that
164 # adopted PEP 518 early for the ability to specify requirements to
165 # execute setup.py, but never considered needing to mention the build
166 # tools themselves. The original PEP 518 code had a similar check (but
167 # implemented in a different way).
168 backend = "setuptools.build_meta:__legacy__"
169 check = ["setuptools>=40.8.0", "wheel"]
170
171 return (requires, backend, check)
172
[end of src/pip/_internal/pyproject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/pyproject.py b/src/pip/_internal/pyproject.py
--- a/src/pip/_internal/pyproject.py
+++ b/src/pip/_internal/pyproject.py
@@ -5,6 +5,7 @@
import sys
from pip._vendor import pytoml, six
+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._internal.exceptions import InstallationError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
@@ -150,6 +151,21 @@
reason="'build-system.requires' is not a list of strings.",
))
+ # Each requirement must be valid as per PEP 508
+ for requirement in requires:
+ try:
+ Requirement(requirement)
+ except InvalidRequirement:
+ raise InstallationError(
+ error_template.format(
+ package=req_name,
+ reason=(
+ "'build-system.requires' contains an invalid "
+ "requirement: {!r}".format(requirement)
+ ),
+ )
+ )
+
backend = build_system.get("build-backend")
check = [] # type: List[str]
if backend is None:
| {"golden_diff": "diff --git a/src/pip/_internal/pyproject.py b/src/pip/_internal/pyproject.py\n--- a/src/pip/_internal/pyproject.py\n+++ b/src/pip/_internal/pyproject.py\n@@ -5,6 +5,7 @@\n import sys\n \n from pip._vendor import pytoml, six\n+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement\n \n from pip._internal.exceptions import InstallationError\n from pip._internal.utils.typing import MYPY_CHECK_RUNNING\n@@ -150,6 +151,21 @@\n reason=\"'build-system.requires' is not a list of strings.\",\n ))\n \n+ # Each requirement must be valid as per PEP 508\n+ for requirement in requires:\n+ try:\n+ Requirement(requirement)\n+ except InvalidRequirement:\n+ raise InstallationError(\n+ error_template.format(\n+ package=req_name,\n+ reason=(\n+ \"'build-system.requires' contains an invalid \"\n+ \"requirement: {!r}\".format(requirement)\n+ ),\n+ )\n+ )\n+\n backend = build_system.get(\"build-backend\")\n check = [] # type: List[str]\n if backend is None:\n", "issue": "Call for clarification: acceptable values for build-system.requires in pyproject.toml\n**Environment**\r\n\r\n* pip version: 19.0.3\r\n* Python version: 3.7\r\n* OS: GNU/Linux\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n<!-- A clear and concise description of what the bug is. -->\r\nIt's not clear nor is clearly specified by PEP518. But I was having a need to have an in-tree build back-end.\r\nSo I've hacked it via installing with a relative path.\r\n*And it works!*\r\nYet, @pradyunsg has pointed out that the way I used probably doesn't conform to PEP 508.\r\nSo I tried some other ways to point to the in-tree distribution. And those didn't work.\r\n\r\n**How to Reproduce**\r\n<!-- Describe the steps to reproduce this bug. -->\r\n(this works)\r\n```toml\r\n[build-system]\r\nrequires = [\"./build-aiohttp\", ]\r\nbuild-backend = \"build_aiohttp.api\"\r\n\r\n# not yet supported, so it doesn't influence anything, it's for forward-compat:\r\nbackend-path = \"./build-aiohttp\"\r\n```\r\n\r\nBut if instead of `\"./build-aiohttp\"` in `requires` I try any of `\"file://build-aiohttp\"`, `\"file:///./build-aiohttp\"`, `\"build_aiohttp @ file://./build-aiohttp\"`, `\"build_aiohttp @ file:./build-aiohttp\"` pip fails to recognize those as installables.\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nI don't know what to expect. The method which works seems to be undefined in PEPs so I probably shouldn't rely on it.\r\n\r\nPip may either decide to improve the filtering of `requires` option or document it being permissive...\r\n\r\nP.S. Oh and, by the way, I was able to test my other PEP517 backend outside of the project tree via\r\n```toml\r\n[build-system]\r\nrequires = [\"../fortunate_pkg\"]\r\n```\r\nso this relative path feature proves to be quite useful for development/debugging purposes.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport io\nimport os\nimport sys\n\nfrom pip._vendor import pytoml, six\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, Tuple, Optional, List\n\n\ndef _is_list_of_str(obj):\n # type: (Any) -> bool\n return (\n isinstance(obj, list) and\n all(isinstance(item, six.string_types) for item in obj)\n )\n\n\ndef make_pyproject_path(unpacked_source_directory):\n # type: (str) -> str\n path = os.path.join(unpacked_source_directory, 'pyproject.toml')\n\n # Python2 __file__ should not be unicode\n if six.PY2 and isinstance(path, six.text_type):\n path = path.encode(sys.getfilesystemencoding())\n\n return path\n\n\ndef load_pyproject_toml(\n use_pep517, # type: Optional[bool]\n pyproject_toml, # type: str\n setup_py, # type: str\n req_name # type: str\n):\n # type: (...) -> Optional[Tuple[List[str], str, List[str]]]\n \"\"\"Load the pyproject.toml file.\n\n Parameters:\n use_pep517 - Has the user requested PEP 517 processing? None\n means the user hasn't explicitly specified.\n pyproject_toml - Location of the project's pyproject.toml file\n setup_py - Location of the project's setup.py file\n req_name - The name of the requirement we're processing (for\n error reporting)\n\n Returns:\n None if we should use the legacy code path, otherwise a tuple\n (\n requirements from pyproject.toml,\n name of PEP 517 backend,\n requirements we should check are installed after setting\n up the build environment\n )\n \"\"\"\n has_pyproject = os.path.isfile(pyproject_toml)\n has_setup = os.path.isfile(setup_py)\n\n if has_pyproject:\n with io.open(pyproject_toml, encoding=\"utf-8\") as f:\n pp_toml = pytoml.load(f)\n build_system = pp_toml.get(\"build-system\")\n else:\n build_system = None\n\n # The following cases must use PEP 517\n # We check for use_pep517 being non-None and falsey because that means\n # the user explicitly requested --no-use-pep517. The value 0 as\n # opposed to False can occur when the value is provided via an\n # environment variable or config file option (due to the quirk of\n # strtobool() returning an integer in pip's configuration code).\n if has_pyproject and not has_setup:\n if use_pep517 is not None and not use_pep517:\n raise InstallationError(\n \"Disabling PEP 517 processing is invalid: \"\n \"project does not have a setup.py\"\n )\n use_pep517 = True\n elif build_system and \"build-backend\" in build_system:\n if use_pep517 is not None and not use_pep517:\n raise InstallationError(\n \"Disabling PEP 517 processing is invalid: \"\n \"project specifies a build backend of {} \"\n \"in pyproject.toml\".format(\n build_system[\"build-backend\"]\n )\n )\n use_pep517 = True\n\n # If we haven't worked out whether to use PEP 517 yet,\n # and the user hasn't explicitly stated a preference,\n # we do so if the project has a pyproject.toml file.\n elif use_pep517 is None:\n use_pep517 = has_pyproject\n\n # At this point, we know whether we're going to use PEP 517.\n assert use_pep517 is not None\n\n # If we're using the legacy code path, there is nothing further\n # for us to do here.\n if not use_pep517:\n return None\n\n if build_system is None:\n # Either the user has a pyproject.toml with no build-system\n # section, or the user has no pyproject.toml, but has opted in\n # explicitly via --use-pep517.\n # In the absence of any explicit backend specification, we\n # assume the setuptools backend that most closely emulates the\n # traditional direct setup.py execution, and require wheel and\n # a version of setuptools that supports that backend.\n\n build_system = {\n \"requires\": [\"setuptools>=40.8.0\", \"wheel\"],\n \"build-backend\": \"setuptools.build_meta:__legacy__\",\n }\n\n # If we're using PEP 517, we have build system information (either\n # from pyproject.toml, or defaulted by the code above).\n # Note that at this point, we do not know if the user has actually\n # specified a backend, though.\n assert build_system is not None\n\n # Ensure that the build-system section in pyproject.toml conforms\n # to PEP 518.\n error_template = (\n \"{package} has a pyproject.toml file that does not comply \"\n \"with PEP 518: {reason}\"\n )\n\n # Specifying the build-system table but not the requires key is invalid\n if \"requires\" not in build_system:\n raise InstallationError(\n error_template.format(package=req_name, reason=(\n \"it has a 'build-system' table but not \"\n \"'build-system.requires' which is mandatory in the table\"\n ))\n )\n\n # Error out if requires is not a list of strings\n requires = build_system[\"requires\"]\n if not _is_list_of_str(requires):\n raise InstallationError(error_template.format(\n package=req_name,\n reason=\"'build-system.requires' is not a list of strings.\",\n ))\n\n backend = build_system.get(\"build-backend\")\n check = [] # type: List[str]\n if backend is None:\n # If the user didn't specify a backend, we assume they want to use\n # the setuptools backend. But we can't be sure they have included\n # a version of setuptools which supplies the backend, or wheel\n # (which is needed by the backend) in their requirements. So we\n # make a note to check that those requirements are present once\n # we have set up the environment.\n # This is quite a lot of work to check for a very specific case. But\n # the problem is, that case is potentially quite common - projects that\n # adopted PEP 518 early for the ability to specify requirements to\n # execute setup.py, but never considered needing to mention the build\n # tools themselves. The original PEP 518 code had a similar check (but\n # implemented in a different way).\n backend = \"setuptools.build_meta:__legacy__\"\n check = [\"setuptools>=40.8.0\", \"wheel\"]\n\n return (requires, backend, check)\n", "path": "src/pip/_internal/pyproject.py"}]} | 3,004 | 261 |
gh_patches_debug_33161 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-952 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use tf.function-decorated functions to accelerate model compute
</issue>
<code>
[start of elasticdl/python/worker/worker.py]
1 import logging
2 import time
3 import traceback
4 from contextlib import closing
5
6 import recordio
7 import tensorflow as tf
8 from tensorflow.python.ops import math_ops
9
10 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
11 from elasticdl.python.common.model_helper import (
12 load_model_from_module,
13 load_module,
14 )
15 from elasticdl.python.common.ndarray import (
16 ndarray_to_tensor,
17 tensor_to_ndarray,
18 )
19
20 # The default maximum number of a minibatch retry as its results
21 # (e.g. gradients) are not accepted by master.
22 DEFAULT_MAX_MINIBATCH_RETRY_NUM = 64
23
24
25 class Worker(object):
26 """ElasticDL worker"""
27
28 def __init__(
29 self,
30 worker_id,
31 model_file,
32 input_fn="input_fn",
33 loss="loss",
34 optimizer="optimizer",
35 eval_metrics_fn="eval_metrics_fn",
36 channel=None,
37 model_def=None,
38 model_params="",
39 max_minibatch_retry_num=DEFAULT_MAX_MINIBATCH_RETRY_NUM,
40 ):
41 """
42 Arguments:
43 model_file: A module to define the model
44 channel: grpc channel
45 max_minibatch_retry_num: The maximum number of a minibatch retry
46 as its results (e.g. gradients) are not accepted by master.
47 """
48 self._logger = logging.getLogger(__name__)
49 self._worker_id = worker_id
50 model_module = load_module(model_file).__dict__
51 self._model = load_model_from_module(
52 model_def, model_module, model_params
53 )
54 self._var_created = self._model.built
55 self._input_fn = model_module[input_fn]
56 self._opt_fn = model_module[optimizer]
57 self._loss = model_module[loss]
58 self._eval_metrics_fn = model_module[eval_metrics_fn]
59
60 if channel is None:
61 self._stub = None
62 else:
63 self._stub = elasticdl_pb2_grpc.MasterStub(channel)
64 self._max_minibatch_retry_num = max_minibatch_retry_num
65 self._model_version = -1
66
67 def get_task(self):
68 """
69 get task from master
70 """
71 req = elasticdl_pb2.GetTaskRequest()
72 req.worker_id = self._worker_id
73
74 return self._stub.GetTask(req)
75
76 def get_model(self, version, method):
77 """
78 get model from master, and update model_version
79 """
80 req = elasticdl_pb2.GetModelRequest()
81 req.version = version
82 req.method = method
83 model = self._stub.GetModel(req)
84
85 for var in self._model.trainable_variables:
86 # Assumes all trainable variables exist in model.param.
87 var.assign(tensor_to_ndarray(model.param[var.name]))
88 self._model_version = model.version
89
90 def report_task_result(self, task_id, err_msg):
91 """
92 report task result to master
93 """
94 report = elasticdl_pb2.ReportTaskResultRequest()
95 report.task_id = task_id
96 report.err_message = err_msg
97 return self._stub.ReportTaskResult(report)
98
99 def report_variable(self):
100 """
101 report variable to ps.
102 """
103 req = elasticdl_pb2.ReportVariableRequest()
104 for v in self._model.trainable_variables:
105 req.variable[v.name].CopyFrom(ndarray_to_tensor(v.numpy()))
106 self._stub.ReportVariable(req)
107
108 def report_gradient(self, grads):
109 """
110 report gradient to ps, return (accepted, model_version) from rpc call.
111 """
112 req = elasticdl_pb2.ReportGradientRequest()
113 for g, v in zip(grads, self._model.trainable_variables):
114 req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))
115 req.model_version = self._model_version
116 res = self._stub.ReportGradient(req)
117 return res.accepted, res.model_version
118
119 def report_evaluation_metrics(self, evaluation_metrics):
120 """
121 report evaluation metrics to ps, return (accepted, model_version)
122 from rpc call.
123 """
124 req = elasticdl_pb2.ReportEvaluationMetricsRequest()
125 for k, v in evaluation_metrics.items():
126 v_np = v.numpy()
127 # If scalar, convert to numpy 1D array with size 1
128 if not v_np.shape:
129 v_np = v_np.reshape(1)
130 req.evaluation_metrics[k].CopyFrom(ndarray_to_tensor(v_np))
131 req.model_version = self._model_version
132 res = self._stub.ReportEvaluationMetrics(req)
133 return res.accepted, res.model_version
134
135 def report_prediction_outputs(self, predictions):
136 self._logger.info("Predicted: %f" % predictions.numpy())
137 # TODO: Decide whether we need to send results to master first
138 # or write results to destination directly from workers.
139 # Also, need to think about how users configure where to
140 # write results.
141 return True
142
143 def _get_batch(self, reader, batch_size):
144 res = []
145 for i in range(batch_size):
146 record = reader.record()
147 if record is None:
148 break
149 res.append(record)
150 return res
151
152 def _create_variable_and_report(self, features):
153 # Use model.call to create variables, then report to ps
154 _ = self._model.call(features)
155 self.report_variable()
156 self._var_created = True
157
158 def _run_training_task(self, features, labels):
159 with tf.GradientTape() as tape:
160 outputs = self._model.call(features, training=True)
161 loss = self._loss(outputs, labels)
162 # Add regularization loss if any
163 if self._model.losses:
164 loss += math_ops.add_n(self._model.losses)
165 grads = tape.gradient(loss, self._model.trainable_variables)
166 accepted, min_model_version = self.report_gradient(grads)
167 return accepted, min_model_version, loss
168
169 def _run_evaluation_task(self, features, labels):
170 outputs = self._model.call(features, training=False)
171 evaluation_metrics = self._eval_metrics_fn(outputs, labels)
172 return self.report_evaluation_metrics(evaluation_metrics)
173
174 def _run_prediction_task(self, features):
175 predictions = self._model.call(features, training=False)
176 return self.report_prediction_outputs(predictions)
177
178 def _handle_task(self, task):
179 min_model_version = task.model_version
180 with closing(
181 recordio.Scanner(
182 task.shard_file_name, task.start, task.end - task.start
183 )
184 ) as reader:
185 while True:
186 record_buf = self._get_batch(reader, task.minibatch_size)
187 if not record_buf:
188 break
189 min_model_version = self._process_minibatch(
190 task, record_buf, min_model_version
191 )
192
193 def _process_minibatch(self, task, record_buf, min_model_version):
194 # TODO: Discuss how we separate input_fn for different tasks
195 features, labels = self._input_fn(record_buf)
196 if not self._var_created:
197 self._create_variable_and_report(features)
198 for _ in range(self._max_minibatch_retry_num):
199 if task.type == elasticdl_pb2.EVALUATION:
200 if min_model_version == -1:
201 if self._model_version < 0:
202 self.get_model(0, elasticdl_pb2.MINIMUM)
203 elif self._model_version != min_model_version:
204 self.get_model(min_model_version, elasticdl_pb2.FIXED)
205 accepted, _ = self._run_evaluation_task(features, labels)
206 if accepted:
207 break
208 elif task.type == elasticdl_pb2.TRAINING:
209 # TODO: optimize the logic to avoid unnecessary
210 # get_model call.
211 self.get_model(
212 max(self._model_version, min_model_version),
213 elasticdl_pb2.MINIMUM,
214 )
215 accepted, min_model_version, loss = self._run_training_task(
216 features, labels
217 )
218 if accepted:
219 self._logger.info("Loss is %f" % loss.numpy())
220 break
221 elif task.type == elasticdl_pb2.PREDICTION:
222 if self._model_version != min_model_version:
223 self.get_model(min_model_version, elasticdl_pb2.FIXED)
224 accepted = self._run_prediction_task(features)
225 if accepted:
226 break
227 else:
228 raise RuntimeError("Unrecognized task type, %s" % task.type)
229 else:
230 # Worker got stuck, fail the task.
231 # TODO: stop the worker if it fails to make any
232 # progress for some time.
233 raise RuntimeError("Worker got stuck")
234 return min_model_version
235
236 def run(self):
237 """
238 Fetches task from master and performs training or evaluation.
239 """
240 while True:
241 task = self.get_task()
242 if not task.shard_file_name:
243 if task.type == elasticdl_pb2.WAIT:
244 # Wait a few seconds then try to get_task again
245 time.sleep(5)
246 continue
247 else:
248 # No more task
249 self._logger.info("No more task, stopping")
250 break
251 self._logger.info("Receive a new task: %d", task.task_id)
252 err_msg = ""
253 try:
254 self._handle_task(task)
255 except RuntimeError as err:
256 err_msg = str(err)
257 traceback.print_exc()
258 except Exception as ex:
259 err_msg = str(ex)
260 traceback.print_exc()
261 raise ex
262 self.report_task_result(task.task_id, err_msg)
263
[end of elasticdl/python/worker/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/worker/worker.py b/elasticdl/python/worker/worker.py
--- a/elasticdl/python/worker/worker.py
+++ b/elasticdl/python/worker/worker.py
@@ -155,7 +155,8 @@
self.report_variable()
self._var_created = True
- def _run_training_task(self, features, labels):
+ @tf.function
+ def training_process(self, features, labels):
with tf.GradientTape() as tape:
outputs = self._model.call(features, training=True)
loss = self._loss(outputs, labels)
@@ -163,16 +164,30 @@
if self._model.losses:
loss += math_ops.add_n(self._model.losses)
grads = tape.gradient(loss, self._model.trainable_variables)
+ return loss, grads
+
+ @tf.function
+ def evaluation_process(self, features, labels):
+ outputs = self._model.call(features, training=False)
+ evaluation_metrics = self._eval_metrics_fn(outputs, labels)
+ return evaluation_metrics
+
+ @tf.function
+ def predict_process(self, features):
+ outputs = self._model.call(features, training=False)
+ return outputs
+
+ def _run_training_task(self, features, labels):
+ loss, grads = self.training_process(features, labels)
accepted, min_model_version = self.report_gradient(grads)
return accepted, min_model_version, loss
def _run_evaluation_task(self, features, labels):
- outputs = self._model.call(features, training=False)
- evaluation_metrics = self._eval_metrics_fn(outputs, labels)
+ evaluation_metrics = self.evaluation_process(features, labels)
return self.report_evaluation_metrics(evaluation_metrics)
def _run_prediction_task(self, features):
- predictions = self._model.call(features, training=False)
+ predictions = self.predict_process(features)
return self.report_prediction_outputs(predictions)
def _handle_task(self, task):
| {"golden_diff": "diff --git a/elasticdl/python/worker/worker.py b/elasticdl/python/worker/worker.py\n--- a/elasticdl/python/worker/worker.py\n+++ b/elasticdl/python/worker/worker.py\n@@ -155,7 +155,8 @@\n self.report_variable()\n self._var_created = True\n \n- def _run_training_task(self, features, labels):\n+ @tf.function\n+ def training_process(self, features, labels):\n with tf.GradientTape() as tape:\n outputs = self._model.call(features, training=True)\n loss = self._loss(outputs, labels)\n@@ -163,16 +164,30 @@\n if self._model.losses:\n loss += math_ops.add_n(self._model.losses)\n grads = tape.gradient(loss, self._model.trainable_variables)\n+ return loss, grads\n+\n+ @tf.function\n+ def evaluation_process(self, features, labels):\n+ outputs = self._model.call(features, training=False)\n+ evaluation_metrics = self._eval_metrics_fn(outputs, labels)\n+ return evaluation_metrics\n+\n+ @tf.function\n+ def predict_process(self, features):\n+ outputs = self._model.call(features, training=False)\n+ return outputs\n+\n+ def _run_training_task(self, features, labels):\n+ loss, grads = self.training_process(features, labels)\n accepted, min_model_version = self.report_gradient(grads)\n return accepted, min_model_version, loss\n \n def _run_evaluation_task(self, features, labels):\n- outputs = self._model.call(features, training=False)\n- evaluation_metrics = self._eval_metrics_fn(outputs, labels)\n+ evaluation_metrics = self.evaluation_process(features, labels)\n return self.report_evaluation_metrics(evaluation_metrics)\n \n def _run_prediction_task(self, features):\n- predictions = self._model.call(features, training=False)\n+ predictions = self.predict_process(features)\n return self.report_prediction_outputs(predictions)\n \n def _handle_task(self, task):\n", "issue": "Use tf.function-decorated functions to accelerate model compute\n\n", "before_files": [{"content": "import logging\nimport time\nimport traceback\nfrom contextlib import closing\n\nimport recordio\nimport tensorflow as tf\nfrom tensorflow.python.ops import math_ops\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.model_helper import (\n load_model_from_module,\n load_module,\n)\nfrom elasticdl.python.common.ndarray import (\n ndarray_to_tensor,\n tensor_to_ndarray,\n)\n\n# The default maximum number of a minibatch retry as its results\n# (e.g. gradients) are not accepted by master.\nDEFAULT_MAX_MINIBATCH_RETRY_NUM = 64\n\n\nclass Worker(object):\n \"\"\"ElasticDL worker\"\"\"\n\n def __init__(\n self,\n worker_id,\n model_file,\n input_fn=\"input_fn\",\n loss=\"loss\",\n optimizer=\"optimizer\",\n eval_metrics_fn=\"eval_metrics_fn\",\n channel=None,\n model_def=None,\n model_params=\"\",\n max_minibatch_retry_num=DEFAULT_MAX_MINIBATCH_RETRY_NUM,\n ):\n \"\"\"\n Arguments:\n model_file: A module to define the model\n channel: grpc channel\n max_minibatch_retry_num: The maximum number of a minibatch retry\n as its results (e.g. gradients) are not accepted by master.\n \"\"\"\n self._logger = logging.getLogger(__name__)\n self._worker_id = worker_id\n model_module = load_module(model_file).__dict__\n self._model = load_model_from_module(\n model_def, model_module, model_params\n )\n self._var_created = self._model.built\n self._input_fn = model_module[input_fn]\n self._opt_fn = model_module[optimizer]\n self._loss = model_module[loss]\n self._eval_metrics_fn = model_module[eval_metrics_fn]\n\n if channel is None:\n self._stub = None\n else:\n self._stub = elasticdl_pb2_grpc.MasterStub(channel)\n self._max_minibatch_retry_num = max_minibatch_retry_num\n self._model_version = -1\n\n def get_task(self):\n \"\"\"\n get task from master\n \"\"\"\n req = elasticdl_pb2.GetTaskRequest()\n req.worker_id = self._worker_id\n\n return self._stub.GetTask(req)\n\n def get_model(self, version, method):\n \"\"\"\n get model from master, and update model_version\n \"\"\"\n req = elasticdl_pb2.GetModelRequest()\n req.version = version\n req.method = method\n model = self._stub.GetModel(req)\n\n for var in self._model.trainable_variables:\n # Assumes all trainable variables exist in model.param.\n var.assign(tensor_to_ndarray(model.param[var.name]))\n self._model_version = model.version\n\n def report_task_result(self, task_id, err_msg):\n \"\"\"\n report task result to master\n \"\"\"\n report = elasticdl_pb2.ReportTaskResultRequest()\n report.task_id = task_id\n report.err_message = err_msg\n return self._stub.ReportTaskResult(report)\n\n def report_variable(self):\n \"\"\"\n report variable to ps.\n \"\"\"\n req = elasticdl_pb2.ReportVariableRequest()\n for v in self._model.trainable_variables:\n req.variable[v.name].CopyFrom(ndarray_to_tensor(v.numpy()))\n self._stub.ReportVariable(req)\n\n def report_gradient(self, grads):\n \"\"\"\n report gradient to ps, return (accepted, model_version) from rpc call.\n \"\"\"\n req = elasticdl_pb2.ReportGradientRequest()\n for g, v in zip(grads, self._model.trainable_variables):\n req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))\n req.model_version = self._model_version\n res = self._stub.ReportGradient(req)\n return res.accepted, res.model_version\n\n def report_evaluation_metrics(self, evaluation_metrics):\n \"\"\"\n report evaluation metrics to ps, return (accepted, model_version)\n from rpc call.\n \"\"\"\n req = elasticdl_pb2.ReportEvaluationMetricsRequest()\n for k, v in evaluation_metrics.items():\n v_np = v.numpy()\n # If scalar, convert to numpy 1D array with size 1\n if not v_np.shape:\n v_np = v_np.reshape(1)\n req.evaluation_metrics[k].CopyFrom(ndarray_to_tensor(v_np))\n req.model_version = self._model_version\n res = self._stub.ReportEvaluationMetrics(req)\n return res.accepted, res.model_version\n\n def report_prediction_outputs(self, predictions):\n self._logger.info(\"Predicted: %f\" % predictions.numpy())\n # TODO: Decide whether we need to send results to master first\n # or write results to destination directly from workers.\n # Also, need to think about how users configure where to\n # write results.\n return True\n\n def _get_batch(self, reader, batch_size):\n res = []\n for i in range(batch_size):\n record = reader.record()\n if record is None:\n break\n res.append(record)\n return res\n\n def _create_variable_and_report(self, features):\n # Use model.call to create variables, then report to ps\n _ = self._model.call(features)\n self.report_variable()\n self._var_created = True\n\n def _run_training_task(self, features, labels):\n with tf.GradientTape() as tape:\n outputs = self._model.call(features, training=True)\n loss = self._loss(outputs, labels)\n # Add regularization loss if any\n if self._model.losses:\n loss += math_ops.add_n(self._model.losses)\n grads = tape.gradient(loss, self._model.trainable_variables)\n accepted, min_model_version = self.report_gradient(grads)\n return accepted, min_model_version, loss\n\n def _run_evaluation_task(self, features, labels):\n outputs = self._model.call(features, training=False)\n evaluation_metrics = self._eval_metrics_fn(outputs, labels)\n return self.report_evaluation_metrics(evaluation_metrics)\n\n def _run_prediction_task(self, features):\n predictions = self._model.call(features, training=False)\n return self.report_prediction_outputs(predictions)\n\n def _handle_task(self, task):\n min_model_version = task.model_version\n with closing(\n recordio.Scanner(\n task.shard_file_name, task.start, task.end - task.start\n )\n ) as reader:\n while True:\n record_buf = self._get_batch(reader, task.minibatch_size)\n if not record_buf:\n break\n min_model_version = self._process_minibatch(\n task, record_buf, min_model_version\n )\n\n def _process_minibatch(self, task, record_buf, min_model_version):\n # TODO: Discuss how we separate input_fn for different tasks\n features, labels = self._input_fn(record_buf)\n if not self._var_created:\n self._create_variable_and_report(features)\n for _ in range(self._max_minibatch_retry_num):\n if task.type == elasticdl_pb2.EVALUATION:\n if min_model_version == -1:\n if self._model_version < 0:\n self.get_model(0, elasticdl_pb2.MINIMUM)\n elif self._model_version != min_model_version:\n self.get_model(min_model_version, elasticdl_pb2.FIXED)\n accepted, _ = self._run_evaluation_task(features, labels)\n if accepted:\n break\n elif task.type == elasticdl_pb2.TRAINING:\n # TODO: optimize the logic to avoid unnecessary\n # get_model call.\n self.get_model(\n max(self._model_version, min_model_version),\n elasticdl_pb2.MINIMUM,\n )\n accepted, min_model_version, loss = self._run_training_task(\n features, labels\n )\n if accepted:\n self._logger.info(\"Loss is %f\" % loss.numpy())\n break\n elif task.type == elasticdl_pb2.PREDICTION:\n if self._model_version != min_model_version:\n self.get_model(min_model_version, elasticdl_pb2.FIXED)\n accepted = self._run_prediction_task(features)\n if accepted:\n break\n else:\n raise RuntimeError(\"Unrecognized task type, %s\" % task.type)\n else:\n # Worker got stuck, fail the task.\n # TODO: stop the worker if it fails to make any\n # progress for some time.\n raise RuntimeError(\"Worker got stuck\")\n return min_model_version\n\n def run(self):\n \"\"\"\n Fetches task from master and performs training or evaluation.\n \"\"\"\n while True:\n task = self.get_task()\n if not task.shard_file_name:\n if task.type == elasticdl_pb2.WAIT:\n # Wait a few seconds then try to get_task again\n time.sleep(5)\n continue\n else:\n # No more task\n self._logger.info(\"No more task, stopping\")\n break\n self._logger.info(\"Receive a new task: %d\", task.task_id)\n err_msg = \"\"\n try:\n self._handle_task(task)\n except RuntimeError as err:\n err_msg = str(err)\n traceback.print_exc()\n except Exception as ex:\n err_msg = str(ex)\n traceback.print_exc()\n raise ex\n self.report_task_result(task.task_id, err_msg)\n", "path": "elasticdl/python/worker/worker.py"}]} | 3,250 | 443 |
gh_patches_debug_16288 | rasdani/github-patches | git_diff | pytorch__vision-7702 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
to_grayscale gives non-actionable deprecation warning
_Originally reported in the [user forum](https://discuss.pytorch.org/t/cannot-find-convert-color-space/182591) by `@function2`._
> When I use to_grayscale, there’s a deprecation warning:
> ```
> UserWarning: The function `to_grayscale(...)` is deprecated in will be removed in a future release. Instead, please use `convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)`.
> ```
> However, I can’t find this function in the current code base
---
Note that this only applies to `torchvision.transforms.v2.function`
https://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/v2/functional/_deprecated.py#L12-L22
since the v1 version, i.e. `torchvision.transforms.functional` does not emit the warning
https://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/functional.py#L1249-L1253
Fixing the v2 warning was forgotten in #7120.
cc @vfdev-5
</issue>
<code>
[start of torchvision/transforms/v2/functional/_deprecated.py]
1 import warnings
2 from typing import Any, List, Union
3
4 import PIL.Image
5 import torch
6
7 from torchvision import datapoints
8 from torchvision.transforms import functional as _F
9
10
11 @torch.jit.unused
12 def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
13 call = ", num_output_channels=3" if num_output_channels == 3 else ""
14 replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
15 if num_output_channels == 3:
16 replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
17 warnings.warn(
18 f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
19 f"Instead, please use `{replacement}`.",
20 )
21
22 return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
23
24
25 @torch.jit.unused
26 def to_tensor(inpt: Any) -> torch.Tensor:
27 warnings.warn(
28 "The function `to_tensor(...)` is deprecated and will be removed in a future release. "
29 "Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
30 )
31 return _F.to_tensor(inpt)
32
33
34 def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
35 warnings.warn(
36 "The function `get_image_size(...)` is deprecated and will be removed in a future release. "
37 "Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
38 )
39 return _F.get_image_size(inpt)
40
[end of torchvision/transforms/v2/functional/_deprecated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py
--- a/torchvision/transforms/v2/functional/_deprecated.py
+++ b/torchvision/transforms/v2/functional/_deprecated.py
@@ -10,15 +10,10 @@
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
- call = ", num_output_channels=3" if num_output_channels == 3 else ""
- replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
- if num_output_channels == 3:
- replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
warnings.warn(
- f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
- f"Instead, please use `{replacement}`.",
+ "The function `to_grayscale` is deprecated in will be removed in a future release. "
+ "Instead, please use `rgb_to_grayscale`.",
)
-
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
| {"golden_diff": "diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py\n--- a/torchvision/transforms/v2/functional/_deprecated.py\n+++ b/torchvision/transforms/v2/functional/_deprecated.py\n@@ -10,15 +10,10 @@\n \n @torch.jit.unused\n def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:\n- call = \", num_output_channels=3\" if num_output_channels == 3 else \"\"\n- replacement = \"convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)\"\n- if num_output_channels == 3:\n- replacement = f\"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)\"\n warnings.warn(\n- f\"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. \"\n- f\"Instead, please use `{replacement}`.\",\n+ \"The function `to_grayscale` is deprecated in will be removed in a future release. \"\n+ \"Instead, please use `rgb_to_grayscale`.\",\n )\n-\n return _F.to_grayscale(inpt, num_output_channels=num_output_channels)\n", "issue": "to_grayscale gives non-actionable deprecation warning\n_Originally reported in the [user forum](https://discuss.pytorch.org/t/cannot-find-convert-color-space/182591) by `@function2`._\r\n\r\n> When I use to_grayscale, there\u2019s a deprecation warning:\r\n> ```\r\n> UserWarning: The function `to_grayscale(...)` is deprecated in will be removed in a future release. Instead, please use `convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)`.\r\n> ```\r\n> However, I can\u2019t find this function in the current code base\r\n\r\n---\r\n\r\nNote that this only applies to `torchvision.transforms.v2.function`\r\n\r\nhttps://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/v2/functional/_deprecated.py#L12-L22\r\n\r\nsince the v1 version, i.e. `torchvision.transforms.functional` does not emit the warning\r\n\r\nhttps://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/functional.py#L1249-L1253\r\n\r\nFixing the v2 warning was forgotten in #7120.\r\n\n\ncc @vfdev-5\n", "before_files": [{"content": "import warnings\nfrom typing import Any, List, Union\n\nimport PIL.Image\nimport torch\n\nfrom torchvision import datapoints\nfrom torchvision.transforms import functional as _F\n\n\[email protected]\ndef to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:\n call = \", num_output_channels=3\" if num_output_channels == 3 else \"\"\n replacement = \"convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)\"\n if num_output_channels == 3:\n replacement = f\"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)\"\n warnings.warn(\n f\"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. \"\n f\"Instead, please use `{replacement}`.\",\n )\n\n return _F.to_grayscale(inpt, num_output_channels=num_output_channels)\n\n\[email protected]\ndef to_tensor(inpt: Any) -> torch.Tensor:\n warnings.warn(\n \"The function `to_tensor(...)` is deprecated and will be removed in a future release. \"\n \"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`.\"\n )\n return _F.to_tensor(inpt)\n\n\ndef get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:\n warnings.warn(\n \"The function `get_image_size(...)` is deprecated and will be removed in a future release. \"\n \"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`.\"\n )\n return _F.get_image_size(inpt)\n", "path": "torchvision/transforms/v2/functional/_deprecated.py"}]} | 1,305 | 276 |
gh_patches_debug_28038 | rasdani/github-patches | git_diff | TheAlgorithms__Python-796 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
math CheckPrime is wrong
The current implementation doesn't support:
* Negative values, -1 is not a prime number. Current implementation raise a TypeError due to `math.sqrt` on negative values.
The current implementation return the wrong value for:
* 0, 0 doesn't have any divider, primes must have two.
* 1, 1 just have one divider, primes must have two.
</issue>
<code>
[start of maths/PrimeCheck.py]
1 import math
2 def primeCheck(number):
3 if number % 2 == 0 and number > 2:
4 return False
5 return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
6
7 def main():
8 print(primeCheck(37))
9 print(primeCheck(100))
10 print(primeCheck(77))
11
12 if __name__ == '__main__':
13 main()
14
[end of maths/PrimeCheck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py
--- a/maths/PrimeCheck.py
+++ b/maths/PrimeCheck.py
@@ -1,13 +1,54 @@
import math
+import unittest
+
+
def primeCheck(number):
- if number % 2 == 0 and number > 2:
+ """
+ A number is prime if it has exactly two dividers: 1 and itself.
+ """
+ if number < 2:
+ # Negatives, 0 and 1 are not primes
return False
- return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
+ if number < 4:
+ # 2 and 3 are primes
+ return True
+ if number % 2 == 0:
+ # Even values are not primes
+ return False
+
+ # Except 2, all primes are odd. If any odd value divide
+ # the number, then that number is not prime.
+ odd_numbers = range(3, int(math.sqrt(number)) + 1, 2)
+ return not any(number % i == 0 for i in odd_numbers)
+
+
+class Test(unittest.TestCase):
+ def test_primes(self):
+ self.assertTrue(primeCheck(2))
+ self.assertTrue(primeCheck(3))
+ self.assertTrue(primeCheck(5))
+ self.assertTrue(primeCheck(7))
+ self.assertTrue(primeCheck(11))
+ self.assertTrue(primeCheck(13))
+ self.assertTrue(primeCheck(17))
+ self.assertTrue(primeCheck(19))
+ self.assertTrue(primeCheck(23))
+ self.assertTrue(primeCheck(29))
+
+ def test_not_primes(self):
+ self.assertFalse(primeCheck(-19),
+ "Negative numbers are not prime.")
+ self.assertFalse(primeCheck(0),
+ "Zero doesn't have any divider, primes must have two")
+ self.assertFalse(primeCheck(1),
+ "One just have 1 divider, primes must have two.")
+ self.assertFalse(primeCheck(2 * 2))
+ self.assertFalse(primeCheck(2 * 3))
+ self.assertFalse(primeCheck(3 * 3))
+ self.assertFalse(primeCheck(3 * 5))
+ self.assertFalse(primeCheck(3 * 5 * 7))
-def main():
- print(primeCheck(37))
- print(primeCheck(100))
- print(primeCheck(77))
if __name__ == '__main__':
- main()
+ unittest.main()
+
| {"golden_diff": "diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py\n--- a/maths/PrimeCheck.py\n+++ b/maths/PrimeCheck.py\n@@ -1,13 +1,54 @@\n import math\n+import unittest\n+\n+\n def primeCheck(number):\n- if number % 2 == 0 and number > 2: \n+ \"\"\"\n+ A number is prime if it has exactly two dividers: 1 and itself.\n+ \"\"\"\n+ if number < 2:\n+ # Negatives, 0 and 1 are not primes\n return False\n- return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))\n+ if number < 4:\n+ # 2 and 3 are primes\n+ return True\n+ if number % 2 == 0:\n+ # Even values are not primes\n+ return False\n+\n+ # Except 2, all primes are odd. If any odd value divide\n+ # the number, then that number is not prime.\n+ odd_numbers = range(3, int(math.sqrt(number)) + 1, 2)\n+ return not any(number % i == 0 for i in odd_numbers)\n+\n+\n+class Test(unittest.TestCase):\n+ def test_primes(self):\n+ self.assertTrue(primeCheck(2))\n+ self.assertTrue(primeCheck(3))\n+ self.assertTrue(primeCheck(5))\n+ self.assertTrue(primeCheck(7))\n+ self.assertTrue(primeCheck(11))\n+ self.assertTrue(primeCheck(13))\n+ self.assertTrue(primeCheck(17))\n+ self.assertTrue(primeCheck(19))\n+ self.assertTrue(primeCheck(23))\n+ self.assertTrue(primeCheck(29))\n+\n+ def test_not_primes(self):\n+ self.assertFalse(primeCheck(-19),\n+ \"Negative numbers are not prime.\")\n+ self.assertFalse(primeCheck(0),\n+ \"Zero doesn't have any divider, primes must have two\")\n+ self.assertFalse(primeCheck(1),\n+ \"One just have 1 divider, primes must have two.\")\n+ self.assertFalse(primeCheck(2 * 2))\n+ self.assertFalse(primeCheck(2 * 3))\n+ self.assertFalse(primeCheck(3 * 3))\n+ self.assertFalse(primeCheck(3 * 5))\n+ self.assertFalse(primeCheck(3 * 5 * 7))\n \n-def main():\n- print(primeCheck(37))\n- print(primeCheck(100))\n- print(primeCheck(77))\n \n if __name__ == '__main__':\n-\tmain()\n+ unittest.main()\n+\n", "issue": "math CheckPrime is wrong\nThe current implementation doesn't support:\r\n\r\n* Negative values, -1 is not a prime number. Current implementation raise a TypeError due to `math.sqrt` on negative values.\r\n\r\nThe current implementation return the wrong value for:\r\n\r\n* 0, 0 doesn't have any divider, primes must have two.\r\n* 1, 1 just have one divider, primes must have two.\n", "before_files": [{"content": "import math\ndef primeCheck(number):\n if number % 2 == 0 and number > 2: \n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))\n\ndef main():\n print(primeCheck(37))\n print(primeCheck(100))\n print(primeCheck(77))\n\nif __name__ == '__main__':\n\tmain()\n", "path": "maths/PrimeCheck.py"}]} | 733 | 599 |
gh_patches_debug_38846 | rasdani/github-patches | git_diff | python-discord__bot-971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove periodic ping in #verification
This creates "accept bombs" that may trigger our anti-raid and burst spam filters, and is not really necessary anymore. We have more than enough people joining without having to periodically nag at them.
</issue>
<code>
[start of bot/cogs/verification.py]
1 import logging
2 from contextlib import suppress
3 from datetime import datetime
4
5 from discord import Colour, Forbidden, Message, NotFound, Object
6 from discord.ext import tasks
7 from discord.ext.commands import Cog, Context, command
8
9 from bot import constants
10 from bot.bot import Bot
11 from bot.cogs.moderation import ModLog
12 from bot.decorators import in_whitelist, without_role
13 from bot.utils.checks import InWhitelistCheckFailure, without_role_check
14
15 log = logging.getLogger(__name__)
16
17 WELCOME_MESSAGE = f"""
18 Hello! Welcome to the server, and thanks for verifying yourself!
19
20 For your records, these are the documents you accepted:
21
22 `1)` Our rules, here: <https://pythondiscord.com/pages/rules>
23 `2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \
24 your information removed here as well.
25
26 Feel free to review them at any point!
27
28 Additionally, if you'd like to receive notifications for the announcements \
29 we post in <#{constants.Channels.announcements}>
30 from time to time, you can send `!subscribe` to <#{constants.Channels.bot_commands}> at any time \
31 to assign yourself the **Announcements** role. We'll mention this role every time we make an announcement.
32
33 If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to \
34 <#{constants.Channels.bot_commands}>.
35 """
36
37 if constants.DEBUG_MODE:
38 PERIODIC_PING = "Periodic checkpoint message successfully sent."
39 else:
40 PERIODIC_PING = (
41 f"@everyone To verify that you have read our rules, please type `{constants.Bot.prefix}accept`."
42 " If you encounter any problems during the verification process, "
43 f"send a direct message to a staff member."
44 )
45 BOT_MESSAGE_DELETE_DELAY = 10
46
47
48 class Verification(Cog):
49 """User verification and role self-management."""
50
51 def __init__(self, bot: Bot):
52 self.bot = bot
53 self.periodic_ping.start()
54
55 @property
56 def mod_log(self) -> ModLog:
57 """Get currently loaded ModLog cog instance."""
58 return self.bot.get_cog("ModLog")
59
60 @Cog.listener()
61 async def on_message(self, message: Message) -> None:
62 """Check new message event for messages to the checkpoint channel & process."""
63 if message.channel.id != constants.Channels.verification:
64 return # Only listen for #checkpoint messages
65
66 if message.author.bot:
67 # They're a bot, delete their message after the delay.
68 # But not the periodic ping; we like that one.
69 if message.content != PERIODIC_PING:
70 await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
71 return
72
73 # if a user mentions a role or guild member
74 # alert the mods in mod-alerts channel
75 if message.mentions or message.role_mentions:
76 log.debug(
77 f"{message.author} mentioned one or more users "
78 f"and/or roles in {message.channel.name}"
79 )
80
81 embed_text = (
82 f"{message.author.mention} sent a message in "
83 f"{message.channel.mention} that contained user and/or role mentions."
84 f"\n\n**Original message:**\n>>> {message.content}"
85 )
86
87 # Send pretty mod log embed to mod-alerts
88 await self.mod_log.send_log_message(
89 icon_url=constants.Icons.filtering,
90 colour=Colour(constants.Colours.soft_red),
91 title=f"User/Role mentioned in {message.channel.name}",
92 text=embed_text,
93 thumbnail=message.author.avatar_url_as(static_format="png"),
94 channel_id=constants.Channels.mod_alerts,
95 )
96
97 ctx: Context = await self.bot.get_context(message)
98 if ctx.command is not None and ctx.command.name == "accept":
99 return
100
101 if any(r.id == constants.Roles.verified for r in ctx.author.roles):
102 log.info(
103 f"{ctx.author} posted '{ctx.message.content}' "
104 "in the verification channel, but is already verified."
105 )
106 return
107
108 log.debug(
109 f"{ctx.author} posted '{ctx.message.content}' in the verification "
110 "channel. We are providing instructions how to verify."
111 )
112 await ctx.send(
113 f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, "
114 f"and gain access to the rest of the server.",
115 delete_after=20
116 )
117
118 log.trace(f"Deleting the message posted by {ctx.author}")
119 with suppress(NotFound):
120 await ctx.message.delete()
121
122 @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)
123 @without_role(constants.Roles.verified)
124 @in_whitelist(channels=(constants.Channels.verification,))
125 async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
126 """Accept our rules and gain access to the rest of the server."""
127 log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.")
128 await ctx.author.add_roles(Object(constants.Roles.verified), reason="Accepted the rules")
129 try:
130 await ctx.author.send(WELCOME_MESSAGE)
131 except Forbidden:
132 log.info(f"Sending welcome message failed for {ctx.author}.")
133 finally:
134 log.trace(f"Deleting accept message by {ctx.author}.")
135 with suppress(NotFound):
136 self.mod_log.ignore(constants.Event.message_delete, ctx.message.id)
137 await ctx.message.delete()
138
139 @command(name='subscribe')
140 @in_whitelist(channels=(constants.Channels.bot_commands,))
141 async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
142 """Subscribe to announcement notifications by assigning yourself the role."""
143 has_role = False
144
145 for role in ctx.author.roles:
146 if role.id == constants.Roles.announcements:
147 has_role = True
148 break
149
150 if has_role:
151 await ctx.send(f"{ctx.author.mention} You're already subscribed!")
152 return
153
154 log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
155 await ctx.author.add_roles(Object(constants.Roles.announcements), reason="Subscribed to announcements")
156
157 log.trace(f"Deleting the message posted by {ctx.author}.")
158
159 await ctx.send(
160 f"{ctx.author.mention} Subscribed to <#{constants.Channels.announcements}> notifications.",
161 )
162
163 @command(name='unsubscribe')
164 @in_whitelist(channels=(constants.Channels.bot_commands,))
165 async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
166 """Unsubscribe from announcement notifications by removing the role from yourself."""
167 has_role = False
168
169 for role in ctx.author.roles:
170 if role.id == constants.Roles.announcements:
171 has_role = True
172 break
173
174 if not has_role:
175 await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
176 return
177
178 log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
179 await ctx.author.remove_roles(Object(constants.Roles.announcements), reason="Unsubscribed from announcements")
180
181 log.trace(f"Deleting the message posted by {ctx.author}.")
182
183 await ctx.send(
184 f"{ctx.author.mention} Unsubscribed from <#{constants.Channels.announcements}> notifications."
185 )
186
187 # This cannot be static (must have a __func__ attribute).
188 async def cog_command_error(self, ctx: Context, error: Exception) -> None:
189 """Check for & ignore any InWhitelistCheckFailure."""
190 if isinstance(error, InWhitelistCheckFailure):
191 error.handled = True
192
193 @staticmethod
194 def bot_check(ctx: Context) -> bool:
195 """Block any command within the verification channel that is not !accept."""
196 if ctx.channel.id == constants.Channels.verification and without_role_check(ctx, *constants.MODERATION_ROLES):
197 return ctx.command.name == "accept"
198 else:
199 return True
200
201 @tasks.loop(hours=12)
202 async def periodic_ping(self) -> None:
203 """Every week, mention @everyone to remind them to verify."""
204 messages = self.bot.get_channel(constants.Channels.verification).history(limit=10)
205 need_to_post = True # True if a new message needs to be sent.
206
207 async for message in messages:
208 if message.author == self.bot.user and message.content == PERIODIC_PING:
209 delta = datetime.utcnow() - message.created_at # Time since last message.
210 if delta.days >= 7: # Message is older than a week.
211 await message.delete()
212 else:
213 need_to_post = False
214
215 break
216
217 if need_to_post:
218 await self.bot.get_channel(constants.Channels.verification).send(PERIODIC_PING)
219
220 @periodic_ping.before_loop
221 async def before_ping(self) -> None:
222 """Only start the loop when the bot is ready."""
223 await self.bot.wait_until_guild_available()
224
225 def cog_unload(self) -> None:
226 """Cancel the periodic ping task when the cog is unloaded."""
227 self.periodic_ping.cancel()
228
229
230 def setup(bot: Bot) -> None:
231 """Load the Verification cog."""
232 bot.add_cog(Verification(bot))
233
[end of bot/cogs/verification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py
--- a/bot/cogs/verification.py
+++ b/bot/cogs/verification.py
@@ -1,9 +1,7 @@
import logging
from contextlib import suppress
-from datetime import datetime
from discord import Colour, Forbidden, Message, NotFound, Object
-from discord.ext import tasks
from discord.ext.commands import Cog, Context, command
from bot import constants
@@ -34,14 +32,6 @@
<#{constants.Channels.bot_commands}>.
"""
-if constants.DEBUG_MODE:
- PERIODIC_PING = "Periodic checkpoint message successfully sent."
-else:
- PERIODIC_PING = (
- f"@everyone To verify that you have read our rules, please type `{constants.Bot.prefix}accept`."
- " If you encounter any problems during the verification process, "
- f"send a direct message to a staff member."
- )
BOT_MESSAGE_DELETE_DELAY = 10
@@ -50,7 +40,6 @@
def __init__(self, bot: Bot):
self.bot = bot
- self.periodic_ping.start()
@property
def mod_log(self) -> ModLog:
@@ -65,9 +54,7 @@
if message.author.bot:
# They're a bot, delete their message after the delay.
- # But not the periodic ping; we like that one.
- if message.content != PERIODIC_PING:
- await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
+ await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
return
# if a user mentions a role or guild member
@@ -198,34 +185,6 @@
else:
return True
- @tasks.loop(hours=12)
- async def periodic_ping(self) -> None:
- """Every week, mention @everyone to remind them to verify."""
- messages = self.bot.get_channel(constants.Channels.verification).history(limit=10)
- need_to_post = True # True if a new message needs to be sent.
-
- async for message in messages:
- if message.author == self.bot.user and message.content == PERIODIC_PING:
- delta = datetime.utcnow() - message.created_at # Time since last message.
- if delta.days >= 7: # Message is older than a week.
- await message.delete()
- else:
- need_to_post = False
-
- break
-
- if need_to_post:
- await self.bot.get_channel(constants.Channels.verification).send(PERIODIC_PING)
-
- @periodic_ping.before_loop
- async def before_ping(self) -> None:
- """Only start the loop when the bot is ready."""
- await self.bot.wait_until_guild_available()
-
- def cog_unload(self) -> None:
- """Cancel the periodic ping task when the cog is unloaded."""
- self.periodic_ping.cancel()
-
def setup(bot: Bot) -> None:
"""Load the Verification cog."""
| {"golden_diff": "diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py\n--- a/bot/cogs/verification.py\n+++ b/bot/cogs/verification.py\n@@ -1,9 +1,7 @@\n import logging\n from contextlib import suppress\n-from datetime import datetime\n \n from discord import Colour, Forbidden, Message, NotFound, Object\n-from discord.ext import tasks\n from discord.ext.commands import Cog, Context, command\n \n from bot import constants\n@@ -34,14 +32,6 @@\n <#{constants.Channels.bot_commands}>.\n \"\"\"\n \n-if constants.DEBUG_MODE:\n- PERIODIC_PING = \"Periodic checkpoint message successfully sent.\"\n-else:\n- PERIODIC_PING = (\n- f\"@everyone To verify that you have read our rules, please type `{constants.Bot.prefix}accept`.\"\n- \" If you encounter any problems during the verification process, \"\n- f\"send a direct message to a staff member.\"\n- )\n BOT_MESSAGE_DELETE_DELAY = 10\n \n \n@@ -50,7 +40,6 @@\n \n def __init__(self, bot: Bot):\n self.bot = bot\n- self.periodic_ping.start()\n \n @property\n def mod_log(self) -> ModLog:\n@@ -65,9 +54,7 @@\n \n if message.author.bot:\n # They're a bot, delete their message after the delay.\n- # But not the periodic ping; we like that one.\n- if message.content != PERIODIC_PING:\n- await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n+ await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n return\n \n # if a user mentions a role or guild member\n@@ -198,34 +185,6 @@\n else:\n return True\n \n- @tasks.loop(hours=12)\n- async def periodic_ping(self) -> None:\n- \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n- messages = self.bot.get_channel(constants.Channels.verification).history(limit=10)\n- need_to_post = True # True if a new message needs to be sent.\n-\n- async for message in messages:\n- if message.author == self.bot.user and message.content == PERIODIC_PING:\n- delta = datetime.utcnow() - message.created_at # Time since last message.\n- if delta.days >= 7: # Message is older than a week.\n- await message.delete()\n- else:\n- need_to_post = False\n-\n- break\n-\n- if need_to_post:\n- await self.bot.get_channel(constants.Channels.verification).send(PERIODIC_PING)\n-\n- @periodic_ping.before_loop\n- async def before_ping(self) -> None:\n- \"\"\"Only start the loop when the bot is ready.\"\"\"\n- await self.bot.wait_until_guild_available()\n-\n- def cog_unload(self) -> None:\n- \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n- self.periodic_ping.cancel()\n-\n \n def setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n", "issue": "Remove periodic ping in #verification\nThis creates \"accept bombs\" that may trigger our anti-raid and burst spam filters, and is not really necessary anymore. We have more than enough people joining without having to periodically nag at them.\n", "before_files": [{"content": "import logging\nfrom contextlib import suppress\nfrom datetime import datetime\n\nfrom discord import Colour, Forbidden, Message, NotFound, Object\nfrom discord.ext import tasks\nfrom discord.ext.commands import Cog, Context, command\n\nfrom bot import constants\nfrom bot.bot import Bot\nfrom bot.cogs.moderation import ModLog\nfrom bot.decorators import in_whitelist, without_role\nfrom bot.utils.checks import InWhitelistCheckFailure, without_role_check\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements \\\nwe post in <#{constants.Channels.announcements}>\nfrom time to time, you can send `!subscribe` to <#{constants.Channels.bot_commands}> at any time \\\nto assign yourself the **Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to \\\n<#{constants.Channels.bot_commands}>.\n\"\"\"\n\nif constants.DEBUG_MODE:\n PERIODIC_PING = \"Periodic checkpoint message successfully sent.\"\nelse:\n PERIODIC_PING = (\n f\"@everyone To verify that you have read our rules, please type `{constants.Bot.prefix}accept`.\"\n \" If you encounter any problems during the verification process, \"\n f\"send a direct message to a staff member.\"\n )\nBOT_MESSAGE_DELETE_DELAY = 10\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.periodic_ping.start()\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.channel.id != constants.Channels.verification:\n return # Only listen for #checkpoint messages\n\n if message.author.bot:\n # They're a bot, delete their message after the delay.\n # But not the periodic ping; we like that one.\n if message.content != PERIODIC_PING:\n await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n return\n\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n log.debug(\n f\"{message.author} mentioned one or more users \"\n f\"and/or roles in {message.channel.name}\"\n )\n\n embed_text = (\n f\"{message.author.mention} sent a message in \"\n f\"{message.channel.mention} that contained user and/or role mentions.\"\n f\"\\n\\n**Original message:**\\n>>> {message.content}\"\n )\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=constants.Icons.filtering,\n colour=Colour(constants.Colours.soft_red),\n title=f\"User/Role mentioned in {message.channel.name}\",\n text=embed_text,\n thumbnail=message.author.avatar_url_as(static_format=\"png\"),\n channel_id=constants.Channels.mod_alerts,\n )\n\n ctx: Context = await self.bot.get_context(message)\n if ctx.command is not None and ctx.command.name == \"accept\":\n return\n\n if any(r.id == constants.Roles.verified for r in ctx.author.roles):\n log.info(\n f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\"\n )\n return\n\n log.debug(\n f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\"\n )\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n with suppress(NotFound):\n await ctx.message.delete()\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(constants.Roles.verified)\n @in_whitelist(channels=(constants.Channels.verification,))\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(constants.Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Forbidden:\n log.info(f\"Sending welcome message failed for {ctx.author}.\")\n finally:\n log.trace(f\"Deleting accept message by {ctx.author}.\")\n with suppress(NotFound):\n self.mod_log.ignore(constants.Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n\n @command(name='subscribe')\n @in_whitelist(channels=(constants.Channels.bot_commands,))\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == constants.Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(constants.Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{constants.Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_whitelist(channels=(constants.Channels.bot_commands,))\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == constants.Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(constants.Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{constants.Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InWhitelistCheckFailure.\"\"\"\n if isinstance(error, InWhitelistCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == constants.Channels.verification and without_role_check(ctx, *constants.MODERATION_ROLES):\n return ctx.command.name == \"accept\"\n else:\n return True\n\n @tasks.loop(hours=12)\n async def periodic_ping(self) -> None:\n \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n messages = self.bot.get_channel(constants.Channels.verification).history(limit=10)\n need_to_post = True # True if a new message needs to be sent.\n\n async for message in messages:\n if message.author == self.bot.user and message.content == PERIODIC_PING:\n delta = datetime.utcnow() - message.created_at # Time since last message.\n if delta.days >= 7: # Message is older than a week.\n await message.delete()\n else:\n need_to_post = False\n\n break\n\n if need_to_post:\n await self.bot.get_channel(constants.Channels.verification).send(PERIODIC_PING)\n\n @periodic_ping.before_loop\n async def before_ping(self) -> None:\n \"\"\"Only start the loop when the bot is ready.\"\"\"\n await self.bot.wait_until_guild_available()\n\n def cog_unload(self) -> None:\n \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n self.periodic_ping.cancel()\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n bot.add_cog(Verification(bot))\n", "path": "bot/cogs/verification.py"}]} | 3,202 | 682 |
gh_patches_debug_655 | rasdani/github-patches | git_diff | pex-tool__pex-2104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.130
On the docket:
+ [x] Pex fails to lock - missing artifact #2098
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.129"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.129"
+__version__ = "2.1.130"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.129\"\n+__version__ = \"2.1.130\"\n", "issue": "Release 2.1.130\nOn the docket:\r\n+ [x] Pex fails to lock - missing artifact #2098 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.129\"\n", "path": "pex/version.py"}]} | 616 | 98 |
gh_patches_debug_22746 | rasdani/github-patches | git_diff | pre-commit__pre-commit-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows: Terminal width support
We detect terminal width in unixlikes by running `tput cols`. This works fine for those platforms but doesn't work well for windows. Maybe find a package which does this logic for us and depend on that.
</issue>
<code>
[start of pre_commit/output.py]
1 from __future__ import unicode_literals
2
3 import os
4 import subprocess
5 import sys
6
7 from pre_commit import color
8 from pre_commit import five
9
10
11 # TODO: smell: import side-effects
12 try:
13 if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
14 raise OSError('Cannot determine width without TERM')
15 else: # pragma no cover (windows)
16 COLS = int(
17 subprocess.Popen(
18 ('tput', 'cols'), stdout=subprocess.PIPE,
19 ).communicate()[0] or
20 # Default in the case of no terminal
21 80
22 )
23 except OSError: # pragma: no cover (windows)
24 COLS = 80
25
26
27 def get_hook_message(
28 start,
29 postfix='',
30 end_msg=None,
31 end_len=0,
32 end_color=None,
33 use_color=None,
34 cols=COLS,
35 ):
36 """Prints a message for running a hook.
37
38 This currently supports three approaches:
39
40 # Print `start` followed by dots, leaving 6 characters at the end
41 >>> print_hook_message('start', end_len=6)
42 start...............................................................
43
44 # Print `start` followed by dots with the end message colored if coloring
45 # is specified and a newline afterwards
46 >>> print_hook_message(
47 'start',
48 end_msg='end',
49 end_color=color.RED,
50 use_color=True,
51 )
52 start...................................................................end
53
54 # Print `start` followed by dots, followed by the `postfix` message
55 # uncolored, followed by the `end_msg` colored if specified and a newline
56 # afterwards
57 >>> print_hook_message(
58 'start',
59 postfix='postfix ',
60 end_msg='end',
61 end_color=color.RED,
62 use_color=True,
63 )
64 start...........................................................postfix end
65 """
66 if bool(end_msg) == bool(end_len):
67 raise ValueError('Expected one of (`end_msg`, `end_len`)')
68 if end_msg is not None and (end_color is None or use_color is None):
69 raise ValueError(
70 '`end_color` and `use_color` are required with `end_msg`'
71 )
72
73 if end_len:
74 return start + '.' * (cols - len(start) - end_len - 1)
75 else:
76 return '{0}{1}{2}{3}\n'.format(
77 start,
78 '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
79 postfix,
80 color.format_color(end_msg, end_color, use_color),
81 )
82
83
84 stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
85
86
87 def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):
88 stream.write(five.to_bytes(s))
89
[end of pre_commit/output.py]
[start of setup.py]
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 setup(
6 name='pre_commit',
7 description=(
8 'A framework for managing and maintaining multi-language pre-commit '
9 'hooks.'
10 ),
11 url='https://github.com/pre-commit/pre-commit',
12 version='0.7.6',
13
14 author='Anthony Sottile',
15 author_email='[email protected]',
16
17 platforms='linux',
18 classifiers=[
19 'License :: OSI Approved :: MIT License',
20 'Programming Language :: Python :: 2',
21 'Programming Language :: Python :: 2.6',
22 'Programming Language :: Python :: 2.7',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.3',
25 'Programming Language :: Python :: 3.4',
26 'Programming Language :: Python :: 3.5',
27 'Programming Language :: Python :: Implementation :: CPython',
28 'Programming Language :: Python :: Implementation :: PyPy',
29 ],
30
31 packages=find_packages('.', exclude=('tests*', 'testing*')),
32 package_data={
33 'pre_commit': [
34 'resources/hook-tmpl',
35 'resources/pre-push-tmpl',
36 'resources/rbenv.tar.gz',
37 'resources/ruby-build.tar.gz',
38 'resources/ruby-download.tar.gz',
39 ]
40 },
41 install_requires=[
42 'argparse',
43 'aspy.yaml',
44 'cached-property',
45 'jsonschema',
46 'nodeenv>=0.11.1',
47 'ordereddict',
48 'pyyaml',
49 'virtualenv',
50 ],
51 entry_points={
52 'console_scripts': [
53 'pre-commit = pre_commit.main:main',
54 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa
55 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa
56 ],
57 },
58 )
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/output.py b/pre_commit/output.py
--- a/pre_commit/output.py
+++ b/pre_commit/output.py
@@ -1,27 +1,15 @@
from __future__ import unicode_literals
-import os
-import subprocess
import sys
+from backports.shutil_get_terminal_size import get_terminal_size
+
from pre_commit import color
from pre_commit import five
-
# TODO: smell: import side-effects
-try:
- if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
- raise OSError('Cannot determine width without TERM')
- else: # pragma no cover (windows)
- COLS = int(
- subprocess.Popen(
- ('tput', 'cols'), stdout=subprocess.PIPE,
- ).communicate()[0] or
- # Default in the case of no terminal
- 80
- )
-except OSError: # pragma: no cover (windows)
- COLS = 80
+# TODO: https://github.com/chrippa/backports.shutil_get_terminal_size/issues/4
+COLS = get_terminal_size().columns or 80
def get_hook_message(
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,6 +41,7 @@
install_requires=[
'argparse',
'aspy.yaml',
+ 'backports.shutil_get_terminal_size',
'cached-property',
'jsonschema',
'nodeenv>=0.11.1',
| {"golden_diff": "diff --git a/pre_commit/output.py b/pre_commit/output.py\n--- a/pre_commit/output.py\n+++ b/pre_commit/output.py\n@@ -1,27 +1,15 @@\n from __future__ import unicode_literals\n \n-import os\n-import subprocess\n import sys\n \n+from backports.shutil_get_terminal_size import get_terminal_size\n+\n from pre_commit import color\n from pre_commit import five\n \n-\n # TODO: smell: import side-effects\n-try:\n- if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n- raise OSError('Cannot determine width without TERM')\n- else: # pragma no cover (windows)\n- COLS = int(\n- subprocess.Popen(\n- ('tput', 'cols'), stdout=subprocess.PIPE,\n- ).communicate()[0] or\n- # Default in the case of no terminal\n- 80\n- )\n-except OSError: # pragma: no cover (windows)\n- COLS = 80\n+# TODO: https://github.com/chrippa/backports.shutil_get_terminal_size/issues/4\n+COLS = get_terminal_size().columns or 80\n \n \n def get_hook_message(\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,6 +41,7 @@\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n+ 'backports.shutil_get_terminal_size',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n", "issue": "Windows: Terminal width support\nWe detect terminal width in unixlikes by running `tput cols`. This works fine for those platforms but doesn't work well for windows. Maybe find a package which does this logic for us and depend on that.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import five\n\n\n# TODO: smell: import side-effects\ntry:\n if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n raise OSError('Cannot determine width without TERM')\n else: # pragma no cover (windows)\n COLS = int(\n subprocess.Popen(\n ('tput', 'cols'), stdout=subprocess.PIPE,\n ).communicate()[0] or\n # Default in the case of no terminal\n 80\n )\nexcept OSError: # pragma: no cover (windows)\n COLS = 80\n\n\ndef get_hook_message(\n start,\n postfix='',\n end_msg=None,\n end_len=0,\n end_color=None,\n use_color=None,\n cols=COLS,\n):\n \"\"\"Prints a message for running a hook.\n\n This currently supports three approaches:\n\n # Print `start` followed by dots, leaving 6 characters at the end\n >>> print_hook_message('start', end_len=6)\n start...............................................................\n\n # Print `start` followed by dots with the end message colored if coloring\n # is specified and a newline afterwards\n >>> print_hook_message(\n 'start',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...................................................................end\n\n # Print `start` followed by dots, followed by the `postfix` message\n # uncolored, followed by the `end_msg` colored if specified and a newline\n # afterwards\n >>> print_hook_message(\n 'start',\n postfix='postfix ',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...........................................................postfix end\n \"\"\"\n if bool(end_msg) == bool(end_len):\n raise ValueError('Expected one of (`end_msg`, `end_len`)')\n if end_msg is not None and (end_color is None or use_color is None):\n raise ValueError(\n '`end_color` and `use_color` are required with `end_msg`'\n )\n\n if end_len:\n return start + '.' * (cols - len(start) - end_len - 1)\n else:\n return '{0}{1}{2}{3}\\n'.format(\n start,\n '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),\n postfix,\n color.format_color(end_msg, end_color, use_color),\n )\n\n\nstdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)\n\n\ndef sys_stdout_write_wrapper(s, stream=stdout_byte_stream):\n stream.write(five.to_bytes(s))\n", "path": "pre_commit/output.py"}, {"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.7.6',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/hook-tmpl',\n 'resources/pre-push-tmpl',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'ordereddict',\n 'pyyaml',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa\n 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa\n ],\n },\n)\n", "path": "setup.py"}]} | 1,875 | 339 |
gh_patches_debug_60612 | rasdani/github-patches | git_diff | cloudtools__troposphere-2037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for additional Flink runtimes in Kinesis Data Analytics.
Kinesis supports additional Flink runtimes (FLINK-1_13, ZEPPELIN-FLINK-1_0, ZEPPELIN-FLINK-2_0), see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html.
</issue>
<code>
[start of troposphere/validators/kinesisanalyticsv2.py]
1 # Copyright (c) 2012-2022, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6
7 def validate_runtime_environment(runtime_environment):
8 """
9 Validate RuntimeEnvironment for Application
10 Property: Application.RuntimeEnvironment
11 """
12
13 VALID_RUNTIME_ENVIRONMENTS = ("SQL-1_0", "FLINK-1_6", "FLINK-1_8", "FLINK-1_11")
14
15 if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:
16 raise ValueError(
17 "Application RuntimeEnvironment must be one of: %s"
18 % ", ".join(VALID_RUNTIME_ENVIRONMENTS)
19 )
20 return runtime_environment
21
[end of troposphere/validators/kinesisanalyticsv2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/validators/kinesisanalyticsv2.py b/troposphere/validators/kinesisanalyticsv2.py
--- a/troposphere/validators/kinesisanalyticsv2.py
+++ b/troposphere/validators/kinesisanalyticsv2.py
@@ -10,7 +10,15 @@
Property: Application.RuntimeEnvironment
"""
- VALID_RUNTIME_ENVIRONMENTS = ("SQL-1_0", "FLINK-1_6", "FLINK-1_8", "FLINK-1_11")
+ VALID_RUNTIME_ENVIRONMENTS = (
+ "FLINK-1_6",
+ "FLINK-1_8",
+ "FLINK-1_11",
+ "FLINK-1_13",
+ "SQL-1_0",
+ "ZEPPELIN-FLINK-1_0",
+ "ZEPPELIN-FLINK-2_0",
+ )
if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:
raise ValueError(
| {"golden_diff": "diff --git a/troposphere/validators/kinesisanalyticsv2.py b/troposphere/validators/kinesisanalyticsv2.py\n--- a/troposphere/validators/kinesisanalyticsv2.py\n+++ b/troposphere/validators/kinesisanalyticsv2.py\n@@ -10,7 +10,15 @@\n Property: Application.RuntimeEnvironment\n \"\"\"\n \n- VALID_RUNTIME_ENVIRONMENTS = (\"SQL-1_0\", \"FLINK-1_6\", \"FLINK-1_8\", \"FLINK-1_11\")\n+ VALID_RUNTIME_ENVIRONMENTS = (\n+ \"FLINK-1_6\",\n+ \"FLINK-1_8\",\n+ \"FLINK-1_11\",\n+ \"FLINK-1_13\",\n+ \"SQL-1_0\",\n+ \"ZEPPELIN-FLINK-1_0\",\n+ \"ZEPPELIN-FLINK-2_0\",\n+ )\n \n if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:\n raise ValueError(\n", "issue": "Add support for additional Flink runtimes in Kinesis Data Analytics.\nKinesis supports additional Flink runtimes (FLINK-1_13, ZEPPELIN-FLINK-1_0, ZEPPELIN-FLINK-2_0), see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html.\n", "before_files": [{"content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\ndef validate_runtime_environment(runtime_environment):\n \"\"\"\n Validate RuntimeEnvironment for Application\n Property: Application.RuntimeEnvironment\n \"\"\"\n\n VALID_RUNTIME_ENVIRONMENTS = (\"SQL-1_0\", \"FLINK-1_6\", \"FLINK-1_8\", \"FLINK-1_11\")\n\n if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:\n raise ValueError(\n \"Application RuntimeEnvironment must be one of: %s\"\n % \", \".join(VALID_RUNTIME_ENVIRONMENTS)\n )\n return runtime_environment\n", "path": "troposphere/validators/kinesisanalyticsv2.py"}]} | 815 | 233 |
gh_patches_debug_979 | rasdani/github-patches | git_diff | acl-org__acl-anthology-3022 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Paper Metadata: 2023.findings-emnlp.1054
### Confirm that this is a metadata correction
- [X] I want to file corrections to make the metadata match the PDF file hosted on the ACL Anthology.
### Anthology ID
2023.findings-emnlp.1054
### Type of Paper Metadata Correction
- [X] Paper Title
- [ ] Paper Abstract
- [ ] Author Name(s)
### Correction to Paper Title
Please change the paper title appeared in Cite (ACL) and Cite (Informal) to "Measuring Pointwise 𝒱-Usable Information In-Context-ly"
### Correction to Paper Abstract
_No response_
### Correction to Author Name(s)
_No response_
</issue>
<code>
[start of bin/anthology/texmath.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Marcel Bollmann <[email protected]>
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from lxml import etree
18 import csv
19 import logging as log
20 import os
21 from TexSoup import TexSoup
22 from TexSoup.data import TexCmd, TexText, TexGroup
23
24 FUNCTION_NAMES = ("lim", "log")
25 TEX_TO_HTML = {
26 "mathrm": ("span", {"class": "font-weight-normal"}),
27 "textrm": ("span", {"class": "font-weight-normal"}),
28 "text": ("span", {"class": "font-weight-normal"}),
29 "mathbf": ("strong", {}),
30 "textbf": ("strong", {}),
31 "boldsymbol": ("strong", {}),
32 "mathit": ("em", {}),
33 "textit": ("em", {}),
34 "emph": ("em", {}),
35 "textsc": ("span", {"style": "font-variant: small-caps;"}),
36 "texttt": ("span", {"class": "text-monospace"}),
37 "textsubscript": ("sub", {}),
38 "textsuperscript": ("sup", {}),
39 }
40 REMOVED_COMMANDS = ("bf", "rm", "it", "sc")
41
42
43 def _append_text(text, trg):
44 if not text:
45 return
46 if len(trg):
47 if trg[-1].tail is not None:
48 trg[-1].tail += text
49 else:
50 trg[-1].tail = text
51 else:
52 if trg.text is not None:
53 trg.text += text
54 else:
55 trg.text = text
56
57
58 class TexMath:
59 """Interpreter and converter for TeX inline math expressions.
60
61 This class uses TexSoup (https://github.com/alvinwan/TexSoup) to parse a TeX
62 expression and converts it to valid HTML. The conversion combines a small
63 number of handwritten rules with a mapping of LaTeX math mode commands to
64 Unicode symbols (http://milde.users.sourceforge.net/LUCR/Math/). Parts that
65 cannot be interpreted using this simple method are preserved as raw LaTeX.
66 """
67
68 def __init__(self, symbolsfile=None):
69 self.cmd_map = {}
70 if symbolsfile is None:
71 symbolsfile = os.path.join(
72 os.path.dirname(os.path.abspath(__file__)), "unimathsymbols.txt"
73 )
74 self.load_symbols(symbolsfile)
75
76 def load_symbols(self, filename):
77 with open(filename, "r", encoding="utf8") as f:
78 reader = csv.reader(f, delimiter="^")
79 for row in reader:
80 if row[0].startswith("#"): # comment
81 continue
82 assert len(row) == 8, "Expect eight-column format"
83 char, cmd = row[1], row[2]
84 if cmd.startswith("\\"):
85 self.cmd_map[cmd[1:]] = char
86 if row[-1].startswith("= ") and ", " in row[-1]:
87 # last column sometimes contains alternative command
88 cmd = row[-1][2:].split(", ")[0]
89 if cmd.startswith("\\"):
90 self.cmd_map[cmd[1:]] = char
91
92 def _parse(self, everything, trg):
93 """Parses a list of TeX constituents into an lxml.etree._Element.
94
95 Arguments:
96 everything: An iterator over TeX constituents as provided by TexSoup
97 trg: The lxml.etree._Element to parse the expression into
98
99 The approach of iterating over the TeX constituents roughly follows
100 <https://github.com/alvinwan/TexSoup/blob/master/examples/list_everything.py>.
101 """
102 sxscript = False # Tracks whether we're in a subscript/superscript
103 for code in everything:
104 if isinstance(code, TexCmd):
105 # code is a TeX command
106 self._parse_command(code, trg)
107 elif isinstance(code, TexText) or isinstance(code, str):
108 # code is text
109 sxscript = self._parse_text(code, trg)
110 elif isinstance(code, TexGroup):
111 # If in subscript/superscript, wrap the entire element in respective tag
112 if sxscript:
113 my_trg = etree.Element(sxscript)
114 self._parse(code.contents, my_trg)
115 trg.append(my_trg)
116 sxscript = False
117 # Otherwise, just parse it normally
118 else:
119 self._parse(code.contents, trg)
120 else:
121 log.error(f"TeX-math parser got unhandled element: {type(code)}")
122
123 def _parse_command(self, code, trg):
124 args = list(code.args)
125 name = str(code.name)
126 # Check if the command is in the list of known Unicode mappings
127 if name in self.cmd_map:
128 _append_text(self.cmd_map[name], trg)
129 self._parse(args, trg)
130 # Check if command + arguments is in the list of known Unicode mappings
131 # (this covers commands like "\mathcal{A}", which have their own entries)
132 elif str(code)[1:] in self.cmd_map:
133 _append_text(self.cmd_map[str(code)[1:]], trg)
134 # Check if command is a known function name (e.g. "log")
135 elif name in FUNCTION_NAMES:
136 sx = etree.Element("span")
137 sx.attrib["class"] = "tex-math-function"
138 sx.text = str(name)
139 trg.append(sx)
140 self._parse(args, trg)
141 # Handle fractions
142 elif name == "frac":
143 self._parse_fraction(args, trg)
144 # Handle commands with simple HTML tag substitutions
145 elif name in TEX_TO_HTML:
146 elem_name, elem_attrib = TEX_TO_HTML[name]
147 sx = etree.Element(elem_name, attrib=elem_attrib)
148 self._parse(args, sx)
149 trg.append(sx)
150 # Known, but unsupported formatting tags that will just be removed
151 elif name in REMOVED_COMMANDS and not args:
152 pass
153 # Give up, but preserve element
154 else:
155 log.warn(f"Unknown TeX-math command: {code}")
156 self._append_unparsed(code, trg)
157
158 def _parse_fraction(self, args, trg):
159 if len(args) != 2:
160 log.warn(f"Couldn't parse \\frac: got {len(args)} arguments, expected 2")
161 self._append_unparsed({'name': 'frac', 'args': args}, trg)
162 else:
163 # Represent numerator of fraction as superscript
164 sx = etree.Element("sup")
165 self._parse([args[0]], sx)
166 trg.append(sx)
167 # Unicode symbol for fraction slash
168 _append_text("\u2044", trg)
169 # Represent denominator of fraction as subscript
170 sx = etree.Element("sub")
171 self._parse([args[1]], sx)
172 trg.append(sx)
173
174 def _parse_text(self, code, trg):
175 text = str(code)
176 # TexSoup doesn't parse any non-alpha command as a command. Ex: \$
177 # However it does seperate them into their own text part. Ex: 'r\\&dd' -> ['r', '\\&', 'dd']
178 # Therefore try to do command mapping replacement of all text beginning with \ and of length 2
179 if len(text) == 2 and text[0] == '\\':
180 text = self.cmd_map.get(text[1], text)
181 _append_text(text, trg)
182 return
183 # parse ^ and _ (won't get recognized as separate nodes by TexSoup)
184 sxscript = False
185 if "^" in text or "_" in text:
186 buf = ""
187 for char in text:
188 if char == "^" or char == "_":
189 _append_text(buf, trg)
190 buf = ""
191 sxscript = "sup" if char == "^" else "sub"
192 elif sxscript:
193 sx = etree.Element(sxscript)
194 sx.text = char
195 trg.append(sx)
196 sxscript = False
197 else:
198 buf += char
199 text = buf
200 # Append as text
201 _append_text(text, trg)
202 return sxscript
203
204 def _append_unparsed(self, code, trg):
205 pre = etree.Element("span")
206 pre.attrib["class"] = "tex-math-unparsed"
207 pre.text = f"\\{code.name}{code.args}"
208 trg.append(pre)
209
210 def etree_to_html(self, element):
211 result = etree.Element("span")
212 result.attrib["class"] = "tex-math"
213 result.tail = element.tail # Preserve tail
214 self._parse(TexSoup(element.text).expr.all, result)
215 return result
216
217 def to_html(self, element):
218 """Converts a TeX math expression to HTML markup."""
219 if isinstance(element, etree._Element):
220 return self.etree_to_html(element)
221 elif isinstance(element, str):
222 value = self.etree_to_html(etree.fromstring(f"<span>{element}</span>"))
223 return etree.tostring(value)
224 raise NotImplementedError(f"Cannot convert elements of type {type(element)}")
225
226 def to_unicode(self, element):
227 """Converts a TeX math expression to a Unicode string.
228
229 This will perform the same conversions as `to_html()`, but strip out the
230 HTML tags afterwards.
231 """
232 element = self.to_html(element)
233 return etree.tostring(element, encoding="unicode", method="text")
234
[end of bin/anthology/texmath.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/anthology/texmath.py b/bin/anthology/texmath.py
--- a/bin/anthology/texmath.py
+++ b/bin/anthology/texmath.py
@@ -230,4 +230,4 @@
HTML tags afterwards.
"""
element = self.to_html(element)
- return etree.tostring(element, encoding="unicode", method="text")
+ return etree.tostring(element, encoding="unicode", method="text", with_tail=False)
| {"golden_diff": "diff --git a/bin/anthology/texmath.py b/bin/anthology/texmath.py\n--- a/bin/anthology/texmath.py\n+++ b/bin/anthology/texmath.py\n@@ -230,4 +230,4 @@\n HTML tags afterwards.\n \"\"\"\n element = self.to_html(element)\n- return etree.tostring(element, encoding=\"unicode\", method=\"text\")\n+ return etree.tostring(element, encoding=\"unicode\", method=\"text\", with_tail=False)\n", "issue": "Paper Metadata: 2023.findings-emnlp.1054\n### Confirm that this is a metadata correction\n\n- [X] I want to file corrections to make the metadata match the PDF file hosted on the ACL Anthology.\n\n### Anthology ID\n\n2023.findings-emnlp.1054\n\n### Type of Paper Metadata Correction\n\n- [X] Paper Title\n- [ ] Paper Abstract\n- [ ] Author Name(s)\n\n### Correction to Paper Title\n\nPlease change the paper title appeared in Cite (ACL) and Cite (Informal) to \"Measuring Pointwise \ud835\udcb1-Usable Information In-Context-ly\"\n\n### Correction to Paper Abstract\n\n_No response_\n\n### Correction to Author Name(s)\n\n_No response_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom lxml import etree\nimport csv\nimport logging as log\nimport os\nfrom TexSoup import TexSoup\nfrom TexSoup.data import TexCmd, TexText, TexGroup\n\nFUNCTION_NAMES = (\"lim\", \"log\")\nTEX_TO_HTML = {\n \"mathrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"textrm\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"text\": (\"span\", {\"class\": \"font-weight-normal\"}),\n \"mathbf\": (\"strong\", {}),\n \"textbf\": (\"strong\", {}),\n \"boldsymbol\": (\"strong\", {}),\n \"mathit\": (\"em\", {}),\n \"textit\": (\"em\", {}),\n \"emph\": (\"em\", {}),\n \"textsc\": (\"span\", {\"style\": \"font-variant: small-caps;\"}),\n \"texttt\": (\"span\", {\"class\": \"text-monospace\"}),\n \"textsubscript\": (\"sub\", {}),\n \"textsuperscript\": (\"sup\", {}),\n}\nREMOVED_COMMANDS = (\"bf\", \"rm\", \"it\", \"sc\")\n\n\ndef _append_text(text, trg):\n if not text:\n return\n if len(trg):\n if trg[-1].tail is not None:\n trg[-1].tail += text\n else:\n trg[-1].tail = text\n else:\n if trg.text is not None:\n trg.text += text\n else:\n trg.text = text\n\n\nclass TexMath:\n \"\"\"Interpreter and converter for TeX inline math expressions.\n\n This class uses TexSoup (https://github.com/alvinwan/TexSoup) to parse a TeX\n expression and converts it to valid HTML. The conversion combines a small\n number of handwritten rules with a mapping of LaTeX math mode commands to\n Unicode symbols (http://milde.users.sourceforge.net/LUCR/Math/). Parts that\n cannot be interpreted using this simple method are preserved as raw LaTeX.\n \"\"\"\n\n def __init__(self, symbolsfile=None):\n self.cmd_map = {}\n if symbolsfile is None:\n symbolsfile = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"unimathsymbols.txt\"\n )\n self.load_symbols(symbolsfile)\n\n def load_symbols(self, filename):\n with open(filename, \"r\", encoding=\"utf8\") as f:\n reader = csv.reader(f, delimiter=\"^\")\n for row in reader:\n if row[0].startswith(\"#\"): # comment\n continue\n assert len(row) == 8, \"Expect eight-column format\"\n char, cmd = row[1], row[2]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n if row[-1].startswith(\"= \") and \", \" in row[-1]:\n # last column sometimes contains alternative command\n cmd = row[-1][2:].split(\", \")[0]\n if cmd.startswith(\"\\\\\"):\n self.cmd_map[cmd[1:]] = char\n\n def _parse(self, everything, trg):\n \"\"\"Parses a list of TeX constituents into an lxml.etree._Element.\n\n Arguments:\n everything: An iterator over TeX constituents as provided by TexSoup\n trg: The lxml.etree._Element to parse the expression into\n\n The approach of iterating over the TeX constituents roughly follows\n <https://github.com/alvinwan/TexSoup/blob/master/examples/list_everything.py>.\n \"\"\"\n sxscript = False # Tracks whether we're in a subscript/superscript\n for code in everything:\n if isinstance(code, TexCmd):\n # code is a TeX command\n self._parse_command(code, trg)\n elif isinstance(code, TexText) or isinstance(code, str):\n # code is text\n sxscript = self._parse_text(code, trg)\n elif isinstance(code, TexGroup):\n # If in subscript/superscript, wrap the entire element in respective tag\n if sxscript:\n my_trg = etree.Element(sxscript)\n self._parse(code.contents, my_trg)\n trg.append(my_trg)\n sxscript = False\n # Otherwise, just parse it normally\n else:\n self._parse(code.contents, trg)\n else:\n log.error(f\"TeX-math parser got unhandled element: {type(code)}\")\n\n def _parse_command(self, code, trg):\n args = list(code.args)\n name = str(code.name)\n # Check if the command is in the list of known Unicode mappings\n if name in self.cmd_map:\n _append_text(self.cmd_map[name], trg)\n self._parse(args, trg)\n # Check if command + arguments is in the list of known Unicode mappings\n # (this covers commands like \"\\mathcal{A}\", which have their own entries)\n elif str(code)[1:] in self.cmd_map:\n _append_text(self.cmd_map[str(code)[1:]], trg)\n # Check if command is a known function name (e.g. \"log\")\n elif name in FUNCTION_NAMES:\n sx = etree.Element(\"span\")\n sx.attrib[\"class\"] = \"tex-math-function\"\n sx.text = str(name)\n trg.append(sx)\n self._parse(args, trg)\n # Handle fractions\n elif name == \"frac\":\n self._parse_fraction(args, trg)\n # Handle commands with simple HTML tag substitutions\n elif name in TEX_TO_HTML:\n elem_name, elem_attrib = TEX_TO_HTML[name]\n sx = etree.Element(elem_name, attrib=elem_attrib)\n self._parse(args, sx)\n trg.append(sx)\n # Known, but unsupported formatting tags that will just be removed\n elif name in REMOVED_COMMANDS and not args:\n pass\n # Give up, but preserve element\n else:\n log.warn(f\"Unknown TeX-math command: {code}\")\n self._append_unparsed(code, trg)\n\n def _parse_fraction(self, args, trg):\n if len(args) != 2:\n log.warn(f\"Couldn't parse \\\\frac: got {len(args)} arguments, expected 2\")\n self._append_unparsed({'name': 'frac', 'args': args}, trg)\n else:\n # Represent numerator of fraction as superscript\n sx = etree.Element(\"sup\")\n self._parse([args[0]], sx)\n trg.append(sx)\n # Unicode symbol for fraction slash\n _append_text(\"\\u2044\", trg)\n # Represent denominator of fraction as subscript\n sx = etree.Element(\"sub\")\n self._parse([args[1]], sx)\n trg.append(sx)\n\n def _parse_text(self, code, trg):\n text = str(code)\n # TexSoup doesn't parse any non-alpha command as a command. Ex: \\$\n # However it does seperate them into their own text part. Ex: 'r\\\\&dd' -> ['r', '\\\\&', 'dd']\n # Therefore try to do command mapping replacement of all text beginning with \\ and of length 2\n if len(text) == 2 and text[0] == '\\\\':\n text = self.cmd_map.get(text[1], text)\n _append_text(text, trg)\n return\n # parse ^ and _ (won't get recognized as separate nodes by TexSoup)\n sxscript = False\n if \"^\" in text or \"_\" in text:\n buf = \"\"\n for char in text:\n if char == \"^\" or char == \"_\":\n _append_text(buf, trg)\n buf = \"\"\n sxscript = \"sup\" if char == \"^\" else \"sub\"\n elif sxscript:\n sx = etree.Element(sxscript)\n sx.text = char\n trg.append(sx)\n sxscript = False\n else:\n buf += char\n text = buf\n # Append as text\n _append_text(text, trg)\n return sxscript\n\n def _append_unparsed(self, code, trg):\n pre = etree.Element(\"span\")\n pre.attrib[\"class\"] = \"tex-math-unparsed\"\n pre.text = f\"\\\\{code.name}{code.args}\"\n trg.append(pre)\n\n def etree_to_html(self, element):\n result = etree.Element(\"span\")\n result.attrib[\"class\"] = \"tex-math\"\n result.tail = element.tail # Preserve tail\n self._parse(TexSoup(element.text).expr.all, result)\n return result\n\n def to_html(self, element):\n \"\"\"Converts a TeX math expression to HTML markup.\"\"\"\n if isinstance(element, etree._Element):\n return self.etree_to_html(element)\n elif isinstance(element, str):\n value = self.etree_to_html(etree.fromstring(f\"<span>{element}</span>\"))\n return etree.tostring(value)\n raise NotImplementedError(f\"Cannot convert elements of type {type(element)}\")\n\n def to_unicode(self, element):\n \"\"\"Converts a TeX math expression to a Unicode string.\n\n This will perform the same conversions as `to_html()`, but strip out the\n HTML tags afterwards.\n \"\"\"\n element = self.to_html(element)\n return etree.tostring(element, encoding=\"unicode\", method=\"text\")\n", "path": "bin/anthology/texmath.py"}]} | 3,437 | 109 |
gh_patches_debug_6154 | rasdani/github-patches | git_diff | litestar-org__litestar-1659 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/repository/filters.py]
1 """Collection filter datastructures."""
2 from __future__ import annotations
3
4 from dataclasses import dataclass
5 from datetime import datetime # noqa: TCH003
6 from typing import TYPE_CHECKING, Generic, Literal, TypeVar
7
8 if TYPE_CHECKING:
9 from collections import abc
10
11
12 T = TypeVar("T")
13
14 __all__ = ["BeforeAfter", "CollectionFilter", "LimitOffset", "OrderBy", "SearchFilter"]
15
16
17 @dataclass
18 class BeforeAfter:
19 """Data required to filter a query on a ``datetime`` column."""
20
21 field_name: str
22 """Name of the model attribute to filter on."""
23 before: datetime | None
24 """Filter results where field earlier than this."""
25 after: datetime | None
26 """Filter results where field later than this."""
27
28
29 @dataclass
30 class CollectionFilter(Generic[T]):
31 """Data required to construct a ``WHERE ... IN (...)`` clause."""
32
33 field_name: str
34 """Name of the model attribute to filter on."""
35 values: abc.Collection[T]
36 """Values for ``IN`` clause."""
37
38
39 @dataclass
40 class LimitOffset:
41 """Data required to add limit/offset filtering to a query."""
42
43 limit: int
44 """Value for ``LIMIT`` clause of query."""
45 offset: int
46 """Value for ``OFFSET`` clause of query."""
47
48
49 @dataclass
50 class OrderBy:
51 """Data required to construct a ``ORDER BY ...`` clause."""
52
53 field_name: str
54 """Name of the model attribute to sort on."""
55 sort_order: Literal["asc", "desc"] = "asc"
56 """Sort ascending or descending"""
57
58
59 @dataclass
60 class SearchFilter:
61 """Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause."""
62
63 field_name: str
64 """Name of the model attribute to sort on."""
65 value: str
66 """Values for ``LIKE`` clause."""
67 ignore_case: bool | None = False
68 """Should the search be case insensitive."""
69
[end of litestar/contrib/repository/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py
--- a/litestar/contrib/repository/filters.py
+++ b/litestar/contrib/repository/filters.py
@@ -1,13 +1,10 @@
"""Collection filter datastructures."""
from __future__ import annotations
+from collections import abc # noqa: TCH003
from dataclasses import dataclass
from datetime import datetime # noqa: TCH003
-from typing import TYPE_CHECKING, Generic, Literal, TypeVar
-
-if TYPE_CHECKING:
- from collections import abc
-
+from typing import Generic, Literal, TypeVar
T = TypeVar("T")
| {"golden_diff": "diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py\n--- a/litestar/contrib/repository/filters.py\n+++ b/litestar/contrib/repository/filters.py\n@@ -1,13 +1,10 @@\n \"\"\"Collection filter datastructures.\"\"\"\n from __future__ import annotations\n \n+from collections import abc # noqa: TCH003\n from dataclasses import dataclass\n from datetime import datetime # noqa: TCH003\n-from typing import TYPE_CHECKING, Generic, Literal, TypeVar\n-\n-if TYPE_CHECKING:\n- from collections import abc\n-\n+from typing import Generic, Literal, TypeVar\n \n T = TypeVar(\"T\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "\"\"\"Collection filter datastructures.\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import datetime # noqa: TCH003\nfrom typing import TYPE_CHECKING, Generic, Literal, TypeVar\n\nif TYPE_CHECKING:\n from collections import abc\n\n\nT = TypeVar(\"T\")\n\n__all__ = [\"BeforeAfter\", \"CollectionFilter\", \"LimitOffset\", \"OrderBy\", \"SearchFilter\"]\n\n\n@dataclass\nclass BeforeAfter:\n \"\"\"Data required to filter a query on a ``datetime`` column.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n before: datetime | None\n \"\"\"Filter results where field earlier than this.\"\"\"\n after: datetime | None\n \"\"\"Filter results where field later than this.\"\"\"\n\n\n@dataclass\nclass CollectionFilter(Generic[T]):\n \"\"\"Data required to construct a ``WHERE ... IN (...)`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n values: abc.Collection[T]\n \"\"\"Values for ``IN`` clause.\"\"\"\n\n\n@dataclass\nclass LimitOffset:\n \"\"\"Data required to add limit/offset filtering to a query.\"\"\"\n\n limit: int\n \"\"\"Value for ``LIMIT`` clause of query.\"\"\"\n offset: int\n \"\"\"Value for ``OFFSET`` clause of query.\"\"\"\n\n\n@dataclass\nclass OrderBy:\n \"\"\"Data required to construct a ``ORDER BY ...`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n sort_order: Literal[\"asc\", \"desc\"] = \"asc\"\n \"\"\"Sort ascending or descending\"\"\"\n\n\n@dataclass\nclass SearchFilter:\n \"\"\"Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n value: str\n \"\"\"Values for ``LIKE`` clause.\"\"\"\n ignore_case: bool | None = False\n \"\"\"Should the search be case insensitive.\"\"\"\n", "path": "litestar/contrib/repository/filters.py"}]} | 1,273 | 155 |
gh_patches_debug_26852 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tiles on plans and container: blue corner missing for external projects
for external projects the little blue corner is missing
mac on chrome and firefox
<img width="400" alt="bildschirmfoto 2019-02-11 um 16 45 01" src="https://user-images.githubusercontent.com/35491681/52574395-7d708980-2e1c-11e9-8cfd-b9f8be74ea16.png">
</issue>
<code>
[start of meinberlin/apps/dashboard/__init__.py]
1 from adhocracy4.dashboard import components
2 from adhocracy4.dashboard import ProjectDashboard
3 from meinberlin.apps.projects import get_project_type
4
5
6 default_app_config = 'meinberlin.apps.dashboard.apps.Config'
7
8
9 class TypedProjectDashboard(ProjectDashboard):
10 def __init__(self, project):
11 self.project_type = get_project_type(project)
12 if self.project_type == 'bplan':
13 project = project.externalproject.bplan
14 elif self.project_type == 'external':
15 project = project.externalproject
16 elif self.project_type == 'container':
17 project = project.projectcontainer
18 super().__init__(project)
19
20 def get_project_components(self):
21 if self.project_type == 'bplan':
22 return [components.projects.get('bplan'),
23 components.projects.get('adminlog')]
24 elif self.project_type == 'external':
25 return [components.projects.get('external'),
26 components.projects.get('adminlog')]
27 elif self.project_type == 'container':
28 return [components.projects.get('container-basic'),
29 components.projects.get('container-information'),
30 components.projects.get('topics'),
31 components.projects.get('point'),
32 components.projects.get('container-projects')]
33
34 return [component for component in components.get_project_components()
35 if component.is_effective(self.project)]
36
37 def get_module_components(self):
38 if self.project_type == 'bplan':
39 return []
40 elif self.project_type == 'external':
41 return []
42 elif self.project_type == 'container':
43 return []
44
45 return components.get_module_components()
46
[end of meinberlin/apps/dashboard/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/dashboard/__init__.py b/meinberlin/apps/dashboard/__init__.py
--- a/meinberlin/apps/dashboard/__init__.py
+++ b/meinberlin/apps/dashboard/__init__.py
@@ -20,15 +20,20 @@
def get_project_components(self):
if self.project_type == 'bplan':
return [components.projects.get('bplan'),
+ components.projects.get('plans'),
components.projects.get('adminlog')]
elif self.project_type == 'external':
return [components.projects.get('external'),
+ components.projects.get('topics'),
+ components.projects.get('point'),
+ components.projects.get('plans'),
components.projects.get('adminlog')]
elif self.project_type == 'container':
return [components.projects.get('container-basic'),
components.projects.get('container-information'),
components.projects.get('topics'),
components.projects.get('point'),
+ components.projects.get('plans'),
components.projects.get('container-projects')]
return [component for component in components.get_project_components()
| {"golden_diff": "diff --git a/meinberlin/apps/dashboard/__init__.py b/meinberlin/apps/dashboard/__init__.py\n--- a/meinberlin/apps/dashboard/__init__.py\n+++ b/meinberlin/apps/dashboard/__init__.py\n@@ -20,15 +20,20 @@\n def get_project_components(self):\n if self.project_type == 'bplan':\n return [components.projects.get('bplan'),\n+ components.projects.get('plans'),\n components.projects.get('adminlog')]\n elif self.project_type == 'external':\n return [components.projects.get('external'),\n+ components.projects.get('topics'),\n+ components.projects.get('point'),\n+ components.projects.get('plans'),\n components.projects.get('adminlog')]\n elif self.project_type == 'container':\n return [components.projects.get('container-basic'),\n components.projects.get('container-information'),\n components.projects.get('topics'),\n components.projects.get('point'),\n+ components.projects.get('plans'),\n components.projects.get('container-projects')]\n \n return [component for component in components.get_project_components()\n", "issue": "tiles on plans and container: blue corner missing for external projects\nfor external projects the little blue corner is missing\r\n\r\nmac on chrome and firefox\r\n\r\n<img width=\"400\" alt=\"bildschirmfoto 2019-02-11 um 16 45 01\" src=\"https://user-images.githubusercontent.com/35491681/52574395-7d708980-2e1c-11e9-8cfd-b9f8be74ea16.png\">\r\n\n", "before_files": [{"content": "from adhocracy4.dashboard import components\nfrom adhocracy4.dashboard import ProjectDashboard\nfrom meinberlin.apps.projects import get_project_type\n\n\ndefault_app_config = 'meinberlin.apps.dashboard.apps.Config'\n\n\nclass TypedProjectDashboard(ProjectDashboard):\n def __init__(self, project):\n self.project_type = get_project_type(project)\n if self.project_type == 'bplan':\n project = project.externalproject.bplan\n elif self.project_type == 'external':\n project = project.externalproject\n elif self.project_type == 'container':\n project = project.projectcontainer\n super().__init__(project)\n\n def get_project_components(self):\n if self.project_type == 'bplan':\n return [components.projects.get('bplan'),\n components.projects.get('adminlog')]\n elif self.project_type == 'external':\n return [components.projects.get('external'),\n components.projects.get('adminlog')]\n elif self.project_type == 'container':\n return [components.projects.get('container-basic'),\n components.projects.get('container-information'),\n components.projects.get('topics'),\n components.projects.get('point'),\n components.projects.get('container-projects')]\n\n return [component for component in components.get_project_components()\n if component.is_effective(self.project)]\n\n def get_module_components(self):\n if self.project_type == 'bplan':\n return []\n elif self.project_type == 'external':\n return []\n elif self.project_type == 'container':\n return []\n\n return components.get_module_components()\n", "path": "meinberlin/apps/dashboard/__init__.py"}]} | 1,072 | 230 |
gh_patches_debug_53786 | rasdani/github-patches | git_diff | psychopy__psychopy-667 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent method signature (or missing hook?)
Windows XP, Psychopy 1.80.06
window.flip calls dispatch_events() for each _eventDispatchers (pyglet winType), but DirectInputDevice (hardware/joystick/pyglet_input/directinput.py) has a different signature for that method: _dispatch_event. Needs to be fixed somewhere.
</issue>
<code>
[start of psychopy/hardware/joystick/pyglet_input/directinput.py]
1 #!/usr/bin/env python2
2 # $Id:$
3
4 import ctypes
5
6 import app
7 import base
8 import pyglet
9 from pyglet.window import win32
10 import dinput
11 from pyglet.window.win32 import _kernel32
12
13 # These instance names are not defined anywhere, obtained by experiment. The
14 # GUID names (which seem to be ideally what are needed) are wrong/missing for
15 # most of my devices.
16
17 _abs_instance_names = {
18 0: 'x',
19 1: 'y',
20 2: 'z',
21 3: 'rx',
22 4: 'ry',
23 5: 'rz',
24 }
25
26 _rel_instance_names = {
27 0: 'x',
28 1: 'y',
29 2: 'wheel',
30 }
31
32 _btn_instance_names = {}
33
34 def _create_control(object_instance):
35 raw_name = object_instance.tszName
36 type = object_instance.dwType
37 instance = dinput.DIDFT_GETINSTANCE(type)
38
39 if type & dinput.DIDFT_ABSAXIS:
40 name = _abs_instance_names.get(instance)
41 control = base.AbsoluteAxis(name, 0, 0xffff, raw_name)
42 elif type & dinput.DIDFT_RELAXIS:
43 name = _rel_instance_names.get(instance)
44 control = base.RelativeAxis(name, raw_name)
45 elif type & dinput.DIDFT_BUTTON:
46 name = _btn_instance_names.get(instance)
47 control = base.Button(name, raw_name)
48 elif type & dinput.DIDFT_POV:
49 control = base.AbsoluteAxis(base.AbsoluteAxis.HAT,
50 0, 0xffffffff, raw_name)
51 else:
52 return
53
54 control._type = object_instance.dwType
55 return control
56
57 class DirectInputDevice(base.Device):
58 def __init__(self, display, device, device_instance):
59 name = device_instance.tszInstanceName
60 super(DirectInputDevice, self).__init__(display, name)
61
62 self._type = device_instance.dwDevType & 0xff
63 self._subtype = device_instance.dwDevType & 0xff00
64
65 self._device = device
66 self._init_controls()
67 self._set_format()
68
69 def _init_controls(self):
70 self.controls = []
71 self._device.EnumObjects(
72 dinput.LPDIENUMDEVICEOBJECTSCALLBACK(self._object_enum),
73 None, dinput.DIDFT_ALL)
74
75 def _object_enum(self, object_instance, arg):
76 control = _create_control(object_instance.contents)
77 if control:
78 self.controls.append(control)
79 return dinput.DIENUM_CONTINUE
80
81 def _set_format(self):
82 if not self.controls:
83 return
84
85 object_formats = (dinput.DIOBJECTDATAFORMAT * len(self.controls))()
86 offset = 0
87 for object_format, control in zip(object_formats, self.controls):
88 object_format.dwOfs = offset
89 object_format.dwType = control._type
90 offset += 4
91
92 format = dinput.DIDATAFORMAT()
93 format.dwSize = ctypes.sizeof(format)
94 format.dwObjSize = ctypes.sizeof(dinput.DIOBJECTDATAFORMAT)
95 format.dwFlags = 0
96 format.dwDataSize = offset
97 format.dwNumObjs = len(object_formats)
98 format.rgodf = ctypes.cast(ctypes.pointer(object_formats),
99 dinput.LPDIOBJECTDATAFORMAT)
100 self._device.SetDataFormat(format)
101
102 prop = dinput.DIPROPDWORD()
103 prop.diph.dwSize = ctypes.sizeof(prop)
104 prop.diph.dwHeaderSize = ctypes.sizeof(prop.diph)
105 prop.diph.dwObj = 0
106 prop.diph.dwHow = dinput.DIPH_DEVICE
107 prop.dwData = 64 * ctypes.sizeof(dinput.DIDATAFORMAT)
108 self._device.SetProperty(dinput.DIPROP_BUFFERSIZE,
109 ctypes.byref(prop.diph))
110
111 def open(self, window=None, exclusive=False):
112 if not self.controls:
113 return
114
115 if window is None:
116 # Pick any open window, or the shadow window if no windows
117 # have been created yet.
118 window = pyglet.gl._shadow_window
119 for window in app.windows:
120 break
121
122 flags = dinput.DISCL_BACKGROUND
123 if exclusive:
124 flags |= dinput.DISCL_EXCLUSIVE
125 else:
126 flags |= dinput.DISCL_NONEXCLUSIVE
127
128 self._wait_object = _kernel32.CreateEventW(None, False, False, None)
129 self._device.SetEventNotification(self._wait_object)
130 app.platform_event_loop.add_wait_object(self._wait_object,
131 self._dispatch_events)
132
133 self._device.SetCooperativeLevel(window._hwnd, flags)
134 self._device.Acquire()
135
136 def close(self):
137 if not self.controls:
138 return
139
140 app.platform_event_loop.remove_wait_object(self._wait_object)
141
142 self._device.Unacquire()
143 self._device.SetEventNotification(None)
144
145 _kernel32.CloseHandle(self._wait_object)
146
147 def get_controls(self):
148 return self.controls
149
150 def _dispatch_events(self):
151 if not self.controls:
152 return
153
154 events = (dinput.DIDEVICEOBJECTDATA * 64)()
155 n_events = win32.DWORD(len(events))
156 self._device.GetDeviceData(ctypes.sizeof(dinput.DIDEVICEOBJECTDATA),
157 ctypes.cast(ctypes.pointer(events),
158 dinput.LPDIDEVICEOBJECTDATA),
159 ctypes.byref(n_events),
160 0)
161 for event in events[:n_events.value]:
162 index = event.dwOfs // 4
163 self.controls[index]._set_value(event.dwData)
164
165 _i_dinput = None
166
167 def _init_directinput():
168 global _i_dinput
169 if _i_dinput:
170 return
171
172 _i_dinput = dinput.IDirectInput8()
173 module = _kernel32.GetModuleHandleW(None)
174 dinput.DirectInput8Create(module, dinput.DIRECTINPUT_VERSION,
175 dinput.IID_IDirectInput8W,
176 ctypes.byref(_i_dinput), None)
177
178 def get_devices(display=None):
179 _init_directinput()
180 _devices = []
181
182 def _device_enum(device_instance, arg):
183 device = dinput.IDirectInputDevice8()
184 _i_dinput.CreateDevice(device_instance.contents.guidInstance,
185 ctypes.byref(device),
186 None)
187 _devices.append(DirectInputDevice(display,
188 device, device_instance.contents))
189
190 return dinput.DIENUM_CONTINUE
191
192 _i_dinput.EnumDevices(dinput.DI8DEVCLASS_ALL,
193 dinput.LPDIENUMDEVICESCALLBACK(_device_enum),
194 None, dinput.DIEDFL_ATTACHEDONLY)
195 return _devices
196
197 def _create_joystick(device):
198 if device._type in (dinput.DI8DEVTYPE_JOYSTICK,
199 dinput.DI8DEVTYPE_GAMEPAD):
200 return base.Joystick(device)
201
202 def get_joysticks(display=None):
203 return filter(None, [_create_joystick(d) for d in get_devices(display)])
204
[end of psychopy/hardware/joystick/pyglet_input/directinput.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/hardware/joystick/pyglet_input/directinput.py b/psychopy/hardware/joystick/pyglet_input/directinput.py
--- a/psychopy/hardware/joystick/pyglet_input/directinput.py
+++ b/psychopy/hardware/joystick/pyglet_input/directinput.py
@@ -162,6 +162,10 @@
index = event.dwOfs // 4
self.controls[index]._set_value(event.dwData)
+ def dispatch_events(self):
+ # standardize the method signature
+ self._dispatch_events()
+
_i_dinput = None
def _init_directinput():
| {"golden_diff": "diff --git a/psychopy/hardware/joystick/pyglet_input/directinput.py b/psychopy/hardware/joystick/pyglet_input/directinput.py\n--- a/psychopy/hardware/joystick/pyglet_input/directinput.py\n+++ b/psychopy/hardware/joystick/pyglet_input/directinput.py\n@@ -162,6 +162,10 @@\n index = event.dwOfs // 4\n self.controls[index]._set_value(event.dwData)\n \n+ def dispatch_events(self):\n+ # standardize the method signature\n+ self._dispatch_events()\n+\n _i_dinput = None\n \n def _init_directinput():\n", "issue": "Inconsistent method signature (or missing hook?)\nWindows XP, Psychopy 1.80.06\n\nwindow.flip calls dispatch_events() for each _eventDispatchers (pyglet winType), but DirectInputDevice (hardware/joystick/pyglet_input/directinput.py) has a different signature for that method: _dispatch_event. Needs to be fixed somewhere.\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n# $Id:$\n\nimport ctypes\n\nimport app\nimport base\nimport pyglet\nfrom pyglet.window import win32\nimport dinput\nfrom pyglet.window.win32 import _kernel32\n\n# These instance names are not defined anywhere, obtained by experiment. The\n# GUID names (which seem to be ideally what are needed) are wrong/missing for\n# most of my devices.\n\n_abs_instance_names = {\n 0: 'x',\n 1: 'y',\n 2: 'z',\n 3: 'rx',\n 4: 'ry',\n 5: 'rz',\n}\n\n_rel_instance_names = {\n 0: 'x',\n 1: 'y',\n 2: 'wheel',\n}\n\n_btn_instance_names = {}\n\ndef _create_control(object_instance):\n raw_name = object_instance.tszName\n type = object_instance.dwType\n instance = dinput.DIDFT_GETINSTANCE(type)\n\n if type & dinput.DIDFT_ABSAXIS:\n name = _abs_instance_names.get(instance)\n control = base.AbsoluteAxis(name, 0, 0xffff, raw_name)\n elif type & dinput.DIDFT_RELAXIS:\n name = _rel_instance_names.get(instance)\n control = base.RelativeAxis(name, raw_name)\n elif type & dinput.DIDFT_BUTTON:\n name = _btn_instance_names.get(instance)\n control = base.Button(name, raw_name)\n elif type & dinput.DIDFT_POV:\n control = base.AbsoluteAxis(base.AbsoluteAxis.HAT,\n 0, 0xffffffff, raw_name)\n else:\n return\n\n control._type = object_instance.dwType\n return control\n\nclass DirectInputDevice(base.Device):\n def __init__(self, display, device, device_instance):\n name = device_instance.tszInstanceName\n super(DirectInputDevice, self).__init__(display, name)\n\n self._type = device_instance.dwDevType & 0xff\n self._subtype = device_instance.dwDevType & 0xff00\n\n self._device = device\n self._init_controls()\n self._set_format()\n\n def _init_controls(self):\n self.controls = []\n self._device.EnumObjects(\n dinput.LPDIENUMDEVICEOBJECTSCALLBACK(self._object_enum),\n None, dinput.DIDFT_ALL)\n\n def _object_enum(self, object_instance, arg):\n control = _create_control(object_instance.contents)\n if control:\n self.controls.append(control)\n return dinput.DIENUM_CONTINUE\n\n def _set_format(self):\n if not self.controls:\n return\n\n object_formats = (dinput.DIOBJECTDATAFORMAT * len(self.controls))()\n offset = 0\n for object_format, control in zip(object_formats, self.controls):\n object_format.dwOfs = offset\n object_format.dwType = control._type\n offset += 4\n\n format = dinput.DIDATAFORMAT()\n format.dwSize = ctypes.sizeof(format)\n format.dwObjSize = ctypes.sizeof(dinput.DIOBJECTDATAFORMAT)\n format.dwFlags = 0\n format.dwDataSize = offset\n format.dwNumObjs = len(object_formats)\n format.rgodf = ctypes.cast(ctypes.pointer(object_formats),\n dinput.LPDIOBJECTDATAFORMAT)\n self._device.SetDataFormat(format)\n\n prop = dinput.DIPROPDWORD()\n prop.diph.dwSize = ctypes.sizeof(prop)\n prop.diph.dwHeaderSize = ctypes.sizeof(prop.diph)\n prop.diph.dwObj = 0\n prop.diph.dwHow = dinput.DIPH_DEVICE\n prop.dwData = 64 * ctypes.sizeof(dinput.DIDATAFORMAT)\n self._device.SetProperty(dinput.DIPROP_BUFFERSIZE,\n ctypes.byref(prop.diph))\n\n def open(self, window=None, exclusive=False):\n if not self.controls:\n return\n\n if window is None:\n # Pick any open window, or the shadow window if no windows\n # have been created yet.\n window = pyglet.gl._shadow_window\n for window in app.windows:\n break\n\n flags = dinput.DISCL_BACKGROUND\n if exclusive:\n flags |= dinput.DISCL_EXCLUSIVE\n else:\n flags |= dinput.DISCL_NONEXCLUSIVE\n\n self._wait_object = _kernel32.CreateEventW(None, False, False, None)\n self._device.SetEventNotification(self._wait_object)\n app.platform_event_loop.add_wait_object(self._wait_object,\n self._dispatch_events)\n\n self._device.SetCooperativeLevel(window._hwnd, flags)\n self._device.Acquire()\n\n def close(self):\n if not self.controls:\n return\n\n app.platform_event_loop.remove_wait_object(self._wait_object)\n\n self._device.Unacquire()\n self._device.SetEventNotification(None)\n\n _kernel32.CloseHandle(self._wait_object)\n\n def get_controls(self):\n return self.controls\n\n def _dispatch_events(self):\n if not self.controls:\n return\n\n events = (dinput.DIDEVICEOBJECTDATA * 64)()\n n_events = win32.DWORD(len(events))\n self._device.GetDeviceData(ctypes.sizeof(dinput.DIDEVICEOBJECTDATA),\n ctypes.cast(ctypes.pointer(events),\n dinput.LPDIDEVICEOBJECTDATA),\n ctypes.byref(n_events),\n 0)\n for event in events[:n_events.value]:\n index = event.dwOfs // 4\n self.controls[index]._set_value(event.dwData)\n\n_i_dinput = None\n\ndef _init_directinput():\n global _i_dinput\n if _i_dinput:\n return\n\n _i_dinput = dinput.IDirectInput8()\n module = _kernel32.GetModuleHandleW(None)\n dinput.DirectInput8Create(module, dinput.DIRECTINPUT_VERSION,\n dinput.IID_IDirectInput8W,\n ctypes.byref(_i_dinput), None)\n\ndef get_devices(display=None):\n _init_directinput()\n _devices = []\n\n def _device_enum(device_instance, arg):\n device = dinput.IDirectInputDevice8()\n _i_dinput.CreateDevice(device_instance.contents.guidInstance,\n ctypes.byref(device),\n None)\n _devices.append(DirectInputDevice(display,\n device, device_instance.contents))\n\n return dinput.DIENUM_CONTINUE\n\n _i_dinput.EnumDevices(dinput.DI8DEVCLASS_ALL,\n dinput.LPDIENUMDEVICESCALLBACK(_device_enum),\n None, dinput.DIEDFL_ATTACHEDONLY)\n return _devices\n\ndef _create_joystick(device):\n if device._type in (dinput.DI8DEVTYPE_JOYSTICK,\n dinput.DI8DEVTYPE_GAMEPAD):\n return base.Joystick(device)\n\ndef get_joysticks(display=None):\n return filter(None, [_create_joystick(d) for d in get_devices(display)])\n", "path": "psychopy/hardware/joystick/pyglet_input/directinput.py"}]} | 2,679 | 147 |
gh_patches_debug_4607 | rasdani/github-patches | git_diff | CTFd__CTFd-1726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect update alert in Admin panel
<!--
If this is a bug report please fill out the template below.
If this is a feature request please describe the behavior that you'd like to see.
-->
**Environment**:
- CTFd Version/Commit: 3.1.1
- Operating System: Ubuntu 20.4
- Web Browser and Version: Chrome 85
**What happened?**
The admin panel shows an alert: "A new CTFd version is available!", which links to "https://github.com/CTFd/CTFd/releases/tag/2.4.2". I encountered the issue with version 3.0.2. as well. After complete reinstall and upgrade to version 3.1.1 the problem persisted
**What did you expect to happen?**
I expected no alert, as my CTFd version is the newest, and certainly newer than 2.4.2.
**How to reproduce your issue**
Go to the admin pages.
**Any associated stack traces or error logs**
No
</issue>
<code>
[start of CTFd/utils/updates/__init__.py]
1 import sys
2 import time
3 from distutils.version import StrictVersion
4 from platform import python_version
5
6 import requests
7 from flask import current_app as app
8
9 from CTFd.models import Challenges, Teams, Users, db
10 from CTFd.utils import get_app_config, get_config, set_config
11 from CTFd.utils.config import is_setup
12 from CTFd.utils.crypto import sha256
13
14
15 def update_check(force=False):
16 """
17 Makes a request to ctfd.io to check if there is a new version of CTFd available. The service is provided in return
18 for users opting in to anonymous usage data collection. Users can opt-out of update checks by specifying
19 UPDATE_CHECK = False in config.py
20
21 :param force:
22 :return:
23 """
24 # If UPDATE_CHECK is disabled don't check for updates at all.
25 if app.config.get("UPDATE_CHECK") is False:
26 return
27
28 # Don't do an update check if not setup
29 if is_setup() is False:
30 return
31
32 # Get when we should check for updates next.
33 next_update_check = get_config("next_update_check") or 0
34
35 # If we have passed our saved time or we are forcing we should check.
36 update = (next_update_check < time.time()) or force
37
38 if update:
39 try:
40 name = str(get_config("ctf_name")) or ""
41 params = {
42 "ctf_id": sha256(name),
43 "current": app.VERSION,
44 "python_version_raw": sys.hexversion,
45 "python_version": python_version(),
46 "db_driver": db.session.bind.dialect.name,
47 "challenge_count": Challenges.query.count(),
48 "user_mode": get_config("user_mode"),
49 "user_count": Users.query.count(),
50 "team_count": Teams.query.count(),
51 "theme": get_config("ctf_theme"),
52 "upload_provider": get_app_config("UPLOAD_PROVIDER"),
53 "channel": app.CHANNEL,
54 }
55 check = requests.get(
56 "https://versioning.ctfd.io/check", params=params, timeout=0.1
57 ).json()
58 except requests.exceptions.RequestException:
59 pass
60 except ValueError:
61 pass
62 else:
63 try:
64 latest = check["resource"]["tag"]
65 html_url = check["resource"]["html_url"]
66 if StrictVersion(latest) > StrictVersion(app.VERSION):
67 set_config("version_latest", html_url)
68 elif StrictVersion(latest) <= StrictVersion(app.VERSION):
69 set_config("version_latest", None)
70 next_update_check_time = check["resource"].get(
71 "next", int(time.time() + 43200)
72 )
73 set_config("next_update_check", next_update_check_time)
74 except KeyError:
75 set_config("version_latest", None)
76
[end of CTFd/utils/updates/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/utils/updates/__init__.py b/CTFd/utils/updates/__init__.py
--- a/CTFd/utils/updates/__init__.py
+++ b/CTFd/utils/updates/__init__.py
@@ -53,7 +53,7 @@
"channel": app.CHANNEL,
}
check = requests.get(
- "https://versioning.ctfd.io/check", params=params, timeout=0.1
+ "https://versioning.ctfd.io/check", params=params, timeout=3
).json()
except requests.exceptions.RequestException:
pass
| {"golden_diff": "diff --git a/CTFd/utils/updates/__init__.py b/CTFd/utils/updates/__init__.py\n--- a/CTFd/utils/updates/__init__.py\n+++ b/CTFd/utils/updates/__init__.py\n@@ -53,7 +53,7 @@\n \"channel\": app.CHANNEL,\n }\n check = requests.get(\n- \"https://versioning.ctfd.io/check\", params=params, timeout=0.1\n+ \"https://versioning.ctfd.io/check\", params=params, timeout=3\n ).json()\n except requests.exceptions.RequestException:\n pass\n", "issue": "Incorrect update alert in Admin panel\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n- CTFd Version/Commit: 3.1.1\r\n- Operating System: Ubuntu 20.4\r\n- Web Browser and Version: Chrome 85\r\n\r\n**What happened?**\r\nThe admin panel shows an alert: \"A new CTFd version is available!\", which links to \"https://github.com/CTFd/CTFd/releases/tag/2.4.2\". I encountered the issue with version 3.0.2. as well. After complete reinstall and upgrade to version 3.1.1 the problem persisted\r\n\r\n**What did you expect to happen?**\r\nI expected no alert, as my CTFd version is the newest, and certainly newer than 2.4.2.\r\n\r\n**How to reproduce your issue**\r\nGo to the admin pages.\r\n\r\n**Any associated stack traces or error logs**\r\nNo\n", "before_files": [{"content": "import sys\nimport time\nfrom distutils.version import StrictVersion\nfrom platform import python_version\n\nimport requests\nfrom flask import current_app as app\n\nfrom CTFd.models import Challenges, Teams, Users, db\nfrom CTFd.utils import get_app_config, get_config, set_config\nfrom CTFd.utils.config import is_setup\nfrom CTFd.utils.crypto import sha256\n\n\ndef update_check(force=False):\n \"\"\"\n Makes a request to ctfd.io to check if there is a new version of CTFd available. The service is provided in return\n for users opting in to anonymous usage data collection. Users can opt-out of update checks by specifying\n UPDATE_CHECK = False in config.py\n\n :param force:\n :return:\n \"\"\"\n # If UPDATE_CHECK is disabled don't check for updates at all.\n if app.config.get(\"UPDATE_CHECK\") is False:\n return\n\n # Don't do an update check if not setup\n if is_setup() is False:\n return\n\n # Get when we should check for updates next.\n next_update_check = get_config(\"next_update_check\") or 0\n\n # If we have passed our saved time or we are forcing we should check.\n update = (next_update_check < time.time()) or force\n\n if update:\n try:\n name = str(get_config(\"ctf_name\")) or \"\"\n params = {\n \"ctf_id\": sha256(name),\n \"current\": app.VERSION,\n \"python_version_raw\": sys.hexversion,\n \"python_version\": python_version(),\n \"db_driver\": db.session.bind.dialect.name,\n \"challenge_count\": Challenges.query.count(),\n \"user_mode\": get_config(\"user_mode\"),\n \"user_count\": Users.query.count(),\n \"team_count\": Teams.query.count(),\n \"theme\": get_config(\"ctf_theme\"),\n \"upload_provider\": get_app_config(\"UPLOAD_PROVIDER\"),\n \"channel\": app.CHANNEL,\n }\n check = requests.get(\n \"https://versioning.ctfd.io/check\", params=params, timeout=0.1\n ).json()\n except requests.exceptions.RequestException:\n pass\n except ValueError:\n pass\n else:\n try:\n latest = check[\"resource\"][\"tag\"]\n html_url = check[\"resource\"][\"html_url\"]\n if StrictVersion(latest) > StrictVersion(app.VERSION):\n set_config(\"version_latest\", html_url)\n elif StrictVersion(latest) <= StrictVersion(app.VERSION):\n set_config(\"version_latest\", None)\n next_update_check_time = check[\"resource\"].get(\n \"next\", int(time.time() + 43200)\n )\n set_config(\"next_update_check\", next_update_check_time)\n except KeyError:\n set_config(\"version_latest\", None)\n", "path": "CTFd/utils/updates/__init__.py"}]} | 1,506 | 134 |
gh_patches_debug_35103 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1001 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libvips chokes on dzi conversion with sparse inputs
Upload a tif file which is sparse, in `tiff.py` line 212:
```
image = pyvips.Image.new_from_file(
str(tiff_file.path.absolute()), access="sequential"
)
pyvips.Image.dzsave(
image, dzi_output, tile_size=settings.DZI_TILE_SIZE
)
```
The `dzsave` will fail with empty tiles, eg:
```
["Image can't be converted to dzi: unable to call dzsave\n TIFFFillTile: 0: Invalid tile byte count, tile 1\nTIFFFillTile: 0: Invalid tile byte count, tile 3\nTIFFFillTile: 0: Invalid tile byte count, tile 4\nTIFFFillTile: 0: Invalid tile byte count, tile 5\nTIFFFillTile: 0: Invalid tile byte count, tile 6\nTIFFFillTile: 0: Invalid tile byte count, tile 7\nTIFFFillTile: 0: Invalid tile byte count, tile 8\nTIFFFillTile: 0: Invalid tile byte count, tile 9\nTIFFFillTile: 0: Invalid tile byte count, tile 10\nTIFFFillTile: 0: Invalid tile byte count, tile 11\nTIFFFillTile: 0: Invalid tile byte count, tile 12\nTIFFFillTile: 0: Invalid tile byte count, tile 13\nTIFFFillTile: 0: Invalid tile byte count, tile 14\nTIFFFillTile: 0: Invalid tile byte count, tile 15\nTIFFFillTile: 0: Invalid tile byte count, tile 16\nTIFFFillTile: 0: Invalid tile byte count, tile 17\nTIFFFillTile: 0: Invalid tile byte count, tile 18\nTIFFFillTile: 0: Invalid tile byte count, tile 19\nTIFFFillTile: 0: Invalid tile byte count, tile 20\nTIFFFillTile: 0: Invalid tile byte count, tile 23\n"]
```
Test image shared with Miriam on slack.
</issue>
<code>
[start of app/grandchallenge/cases/image_builders/tiff.py]
1 from pathlib import Path
2 from tempfile import TemporaryFile
3 from typing import NamedTuple
4 from uuid import uuid4
5
6 import pyvips
7 import tifffile
8 from django.conf import settings
9 from django.core.exceptions import ValidationError
10 from django.core.files import File
11
12 from grandchallenge.cases.image_builders import ImageBuilderResult
13 from grandchallenge.cases.models import FolderUpload, Image, ImageFile
14
15
16 class GrandChallengeTiffFileTags(NamedTuple):
17 image_width: int
18 image_height: int
19 resolution_levels: int
20 color_space: str
21
22
23 class GrandChallengeTiffFile(NamedTuple):
24 path: Path
25 tags: GrandChallengeTiffFileTags
26
27
28 def load_tiff_file(*, path: Path) -> GrandChallengeTiffFile:
29 """
30 Loads and validates a file using tifffile
31 :param path: The path to the potential tiff file
32 :return: A tiff file that can be used in the rest of grand challenge
33 """
34 try:
35 file = tifffile.TiffFile(str(path.absolute()))
36 except ValueError:
37 raise ValidationError("Image isn't a TIFF file")
38
39 tags = _validate_tifffile(pages=file.pages)
40
41 return GrandChallengeTiffFile(path=path, tags=tags)
42
43
44 def _validate_tifffile( # noqa: C901
45 *, pages: tifffile.tifffile.TiffPages
46 ) -> GrandChallengeTiffFileTags:
47 """
48 Validates a tiff file loaded with tifffile for use in grand challenge
49 :param pages: The pages and tags from tiffile
50 :return: The extracted tags that are needed by the rest of the framework
51 """
52 required_tile_tags = ("TileOffsets", "TileByteCounts")
53
54 forbidden_description_tags = ("dicom", "xml")
55
56 tags = pages[0].tags
57
58 # Checks if the image description exists,
59 # if so, ensure there's no DICOM or XML data
60 try:
61 image_description = str(tags["ImageDescription"].value).lower()
62 for forbidden in forbidden_description_tags:
63 if forbidden in image_description:
64 raise ValidationError(
65 "Image contains unauthorized information"
66 )
67 except KeyError:
68 pass
69
70 # Fails if the image doesn't have all required tile tags
71 if not all(tag in tags for tag in required_tile_tags):
72 raise ValidationError("Image has incomplete tile information")
73
74 # Fails if the image only has a single resolution page
75 resolution_levels = len(pages)
76 if resolution_levels == 1:
77 raise ValidationError("Image only has a single resolution level")
78
79 # Fails if the image doesn't have the chunky format
80 if str(tags["PlanarConfiguration"].value) != "PLANARCONFIG.CONTIG":
81 raise ValidationError(
82 "Image planar configuration isn't configured as 'Chunky' format"
83 )
84
85 # Fails if the color space isn't supported
86 try:
87 color_space = get_color_space(
88 str(tags["PhotometricInterpretation"].value)
89 )
90 except KeyError:
91 raise ValidationError("Image lacks color space information")
92
93 # Fails if the amount of bytes per sample doesn't correspond to the
94 # colour space
95 tif_color_channels = tags["SamplesPerPixel"].value
96 if Image.COLOR_SPACE_COMPONENTS[color_space] != tif_color_channels:
97 raise ValidationError("Image contains invalid amount of channels.")
98
99 try:
100 image_width = tags["ImageWidth"].value
101 image_height = tags["ImageLength"].value
102 except KeyError:
103 raise ValidationError("Missing tags in tiff file")
104
105 return GrandChallengeTiffFileTags(
106 image_width=image_width,
107 image_height=image_height,
108 color_space=color_space,
109 resolution_levels=resolution_levels,
110 )
111
112
113 def get_color_space(color_space_string) -> Image.COLOR_SPACES:
114 color_space_string = color_space_string.split(".")[1].upper()
115
116 if color_space_string == "MINISBLACK":
117 color_space = Image.COLOR_SPACE_GRAY
118 else:
119 try:
120 color_space = dict(Image.COLOR_SPACES)[color_space_string]
121 except KeyError:
122 raise ValidationError("Invalid color space")
123
124 return color_space
125
126
127 def image_builder_tiff(path: Path) -> ImageBuilderResult:
128 new_images = []
129 new_image_files = []
130 consumed_files = set()
131 invalid_file_errors = {}
132 new_folder_upload = []
133
134 for file_path in path.iterdir():
135 pk = uuid4()
136
137 try:
138 tiff_file = load_tiff_file(path=file_path)
139 dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)
140 except ValidationError as e:
141 invalid_file_errors[file_path.name] = str(e)
142 continue
143
144 image = create_tiff_image_entry(tiff_file=tiff_file, pk=pk)
145
146 temp_file = TemporaryFile()
147 with open(tiff_file.path.absolute(), "rb") as open_file:
148 buffer = True
149 while buffer:
150 buffer = open_file.read(1024)
151 temp_file.write(buffer)
152
153 new_image_files.append(
154 ImageFile(
155 image=image,
156 image_type=ImageFile.IMAGE_TYPE_TIFF,
157 file=File(temp_file, name=f"{image.pk}.tif"),
158 )
159 )
160
161 temp_dzi_file = TemporaryFile()
162 with open(dzi_output + ".dzi", "rb") as open_file:
163 buffer = True
164 while buffer:
165 buffer = open_file.read(1024)
166 temp_dzi_file.write(buffer)
167
168 new_image_files.append(
169 ImageFile(
170 image=image,
171 image_type=ImageFile.IMAGE_TYPE_DZI,
172 file=File(temp_dzi_file, name=f"{image.pk}.dzi"),
173 )
174 )
175
176 dzi_folder_upload = FolderUpload(
177 folder=dzi_output + "_files", image=image
178 )
179 new_images.append(image)
180 consumed_files.add(tiff_file.path.name)
181 new_folder_upload.append(dzi_folder_upload)
182
183 return ImageBuilderResult(
184 consumed_files=consumed_files,
185 file_errors_map=invalid_file_errors,
186 new_images=new_images,
187 new_image_files=new_image_files,
188 new_folder_upload=new_folder_upload,
189 )
190
191
192 def create_tiff_image_entry(*, tiff_file: GrandChallengeTiffFile, pk) -> Image:
193 # Builds a new Image model item
194 return Image(
195 pk=pk,
196 name=tiff_file.path.name,
197 width=tiff_file.tags.image_width,
198 height=tiff_file.tags.image_height,
199 depth=1,
200 resolution_levels=tiff_file.tags.resolution_levels,
201 color_space=tiff_file.tags.color_space,
202 eye_choice=Image.EYE_UNKNOWN,
203 stereoscopic_choice=Image.STEREOSCOPIC_UNKNOWN,
204 field_of_view=Image.FOV_UNKNOWN,
205 )
206
207
208 def create_dzi_images(*, tiff_file: GrandChallengeTiffFile, pk) -> str:
209 # Creates a dzi file(out.dzi) and corresponding tiles in folder {pk}_files
210 dzi_output = str(tiff_file.path.parent / str(pk))
211 try:
212 image = pyvips.Image.new_from_file(
213 str(tiff_file.path.absolute()), access="sequential"
214 )
215
216 pyvips.Image.dzsave(
217 image, dzi_output, tile_size=settings.DZI_TILE_SIZE
218 )
219 except Exception as e:
220 raise ValidationError("Image can't be converted to dzi: " + str(e))
221
222 return dzi_output
223
[end of app/grandchallenge/cases/image_builders/tiff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/cases/image_builders/tiff.py b/app/grandchallenge/cases/image_builders/tiff.py
--- a/app/grandchallenge/cases/image_builders/tiff.py
+++ b/app/grandchallenge/cases/image_builders/tiff.py
@@ -133,14 +133,18 @@
for file_path in path.iterdir():
pk = uuid4()
-
+ dzi_output = None
try:
tiff_file = load_tiff_file(path=file_path)
- dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)
except ValidationError as e:
invalid_file_errors[file_path.name] = str(e)
continue
+ try:
+ dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)
+ except ValidationError as e:
+ invalid_file_errors[file_path.name] = str(e)
+
image = create_tiff_image_entry(tiff_file=tiff_file, pk=pk)
temp_file = TemporaryFile()
@@ -158,27 +162,29 @@
)
)
- temp_dzi_file = TemporaryFile()
- with open(dzi_output + ".dzi", "rb") as open_file:
- buffer = True
- while buffer:
- buffer = open_file.read(1024)
- temp_dzi_file.write(buffer)
+ if dzi_output:
+ temp_dzi_file = TemporaryFile()
+ with open(dzi_output + ".dzi", "rb") as open_file:
+ buffer = True
+ while buffer:
+ buffer = open_file.read(1024)
+ temp_dzi_file.write(buffer)
+
+ new_image_files.append(
+ ImageFile(
+ image=image,
+ image_type=ImageFile.IMAGE_TYPE_DZI,
+ file=File(temp_dzi_file, name=f"{image.pk}.dzi"),
+ )
+ )
- new_image_files.append(
- ImageFile(
- image=image,
- image_type=ImageFile.IMAGE_TYPE_DZI,
- file=File(temp_dzi_file, name=f"{image.pk}.dzi"),
+ dzi_folder_upload = FolderUpload(
+ folder=dzi_output + "_files", image=image
)
- )
+ new_folder_upload.append(dzi_folder_upload)
- dzi_folder_upload = FolderUpload(
- folder=dzi_output + "_files", image=image
- )
new_images.append(image)
consumed_files.add(tiff_file.path.name)
- new_folder_upload.append(dzi_folder_upload)
return ImageBuilderResult(
consumed_files=consumed_files,
| {"golden_diff": "diff --git a/app/grandchallenge/cases/image_builders/tiff.py b/app/grandchallenge/cases/image_builders/tiff.py\n--- a/app/grandchallenge/cases/image_builders/tiff.py\n+++ b/app/grandchallenge/cases/image_builders/tiff.py\n@@ -133,14 +133,18 @@\n \n for file_path in path.iterdir():\n pk = uuid4()\n-\n+ dzi_output = None\n try:\n tiff_file = load_tiff_file(path=file_path)\n- dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)\n except ValidationError as e:\n invalid_file_errors[file_path.name] = str(e)\n continue\n \n+ try:\n+ dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)\n+ except ValidationError as e:\n+ invalid_file_errors[file_path.name] = str(e)\n+\n image = create_tiff_image_entry(tiff_file=tiff_file, pk=pk)\n \n temp_file = TemporaryFile()\n@@ -158,27 +162,29 @@\n )\n )\n \n- temp_dzi_file = TemporaryFile()\n- with open(dzi_output + \".dzi\", \"rb\") as open_file:\n- buffer = True\n- while buffer:\n- buffer = open_file.read(1024)\n- temp_dzi_file.write(buffer)\n+ if dzi_output:\n+ temp_dzi_file = TemporaryFile()\n+ with open(dzi_output + \".dzi\", \"rb\") as open_file:\n+ buffer = True\n+ while buffer:\n+ buffer = open_file.read(1024)\n+ temp_dzi_file.write(buffer)\n+\n+ new_image_files.append(\n+ ImageFile(\n+ image=image,\n+ image_type=ImageFile.IMAGE_TYPE_DZI,\n+ file=File(temp_dzi_file, name=f\"{image.pk}.dzi\"),\n+ )\n+ )\n \n- new_image_files.append(\n- ImageFile(\n- image=image,\n- image_type=ImageFile.IMAGE_TYPE_DZI,\n- file=File(temp_dzi_file, name=f\"{image.pk}.dzi\"),\n+ dzi_folder_upload = FolderUpload(\n+ folder=dzi_output + \"_files\", image=image\n )\n- )\n+ new_folder_upload.append(dzi_folder_upload)\n \n- dzi_folder_upload = FolderUpload(\n- folder=dzi_output + \"_files\", image=image\n- )\n new_images.append(image)\n consumed_files.add(tiff_file.path.name)\n- new_folder_upload.append(dzi_folder_upload)\n \n return ImageBuilderResult(\n consumed_files=consumed_files,\n", "issue": "libvips chokes on dzi conversion with sparse inputs\nUpload a tif file which is sparse, in `tiff.py` line 212:\r\n\r\n```\r\nimage = pyvips.Image.new_from_file(\r\n str(tiff_file.path.absolute()), access=\"sequential\"\r\n )\r\n\r\n pyvips.Image.dzsave(\r\n image, dzi_output, tile_size=settings.DZI_TILE_SIZE\r\n )\r\n```\r\n\r\nThe `dzsave` will fail with empty tiles, eg:\r\n\r\n```\r\n[\"Image can't be converted to dzi: unable to call dzsave\\n TIFFFillTile: 0: Invalid tile byte count, tile 1\\nTIFFFillTile: 0: Invalid tile byte count, tile 3\\nTIFFFillTile: 0: Invalid tile byte count, tile 4\\nTIFFFillTile: 0: Invalid tile byte count, tile 5\\nTIFFFillTile: 0: Invalid tile byte count, tile 6\\nTIFFFillTile: 0: Invalid tile byte count, tile 7\\nTIFFFillTile: 0: Invalid tile byte count, tile 8\\nTIFFFillTile: 0: Invalid tile byte count, tile 9\\nTIFFFillTile: 0: Invalid tile byte count, tile 10\\nTIFFFillTile: 0: Invalid tile byte count, tile 11\\nTIFFFillTile: 0: Invalid tile byte count, tile 12\\nTIFFFillTile: 0: Invalid tile byte count, tile 13\\nTIFFFillTile: 0: Invalid tile byte count, tile 14\\nTIFFFillTile: 0: Invalid tile byte count, tile 15\\nTIFFFillTile: 0: Invalid tile byte count, tile 16\\nTIFFFillTile: 0: Invalid tile byte count, tile 17\\nTIFFFillTile: 0: Invalid tile byte count, tile 18\\nTIFFFillTile: 0: Invalid tile byte count, tile 19\\nTIFFFillTile: 0: Invalid tile byte count, tile 20\\nTIFFFillTile: 0: Invalid tile byte count, tile 23\\n\"]\r\n```\r\n\r\nTest image shared with Miriam on slack.\n", "before_files": [{"content": "from pathlib import Path\nfrom tempfile import TemporaryFile\nfrom typing import NamedTuple\nfrom uuid import uuid4\n\nimport pyvips\nimport tifffile\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\n\nfrom grandchallenge.cases.image_builders import ImageBuilderResult\nfrom grandchallenge.cases.models import FolderUpload, Image, ImageFile\n\n\nclass GrandChallengeTiffFileTags(NamedTuple):\n image_width: int\n image_height: int\n resolution_levels: int\n color_space: str\n\n\nclass GrandChallengeTiffFile(NamedTuple):\n path: Path\n tags: GrandChallengeTiffFileTags\n\n\ndef load_tiff_file(*, path: Path) -> GrandChallengeTiffFile:\n \"\"\"\n Loads and validates a file using tifffile\n :param path: The path to the potential tiff file\n :return: A tiff file that can be used in the rest of grand challenge\n \"\"\"\n try:\n file = tifffile.TiffFile(str(path.absolute()))\n except ValueError:\n raise ValidationError(\"Image isn't a TIFF file\")\n\n tags = _validate_tifffile(pages=file.pages)\n\n return GrandChallengeTiffFile(path=path, tags=tags)\n\n\ndef _validate_tifffile( # noqa: C901\n *, pages: tifffile.tifffile.TiffPages\n) -> GrandChallengeTiffFileTags:\n \"\"\"\n Validates a tiff file loaded with tifffile for use in grand challenge\n :param pages: The pages and tags from tiffile\n :return: The extracted tags that are needed by the rest of the framework\n \"\"\"\n required_tile_tags = (\"TileOffsets\", \"TileByteCounts\")\n\n forbidden_description_tags = (\"dicom\", \"xml\")\n\n tags = pages[0].tags\n\n # Checks if the image description exists,\n # if so, ensure there's no DICOM or XML data\n try:\n image_description = str(tags[\"ImageDescription\"].value).lower()\n for forbidden in forbidden_description_tags:\n if forbidden in image_description:\n raise ValidationError(\n \"Image contains unauthorized information\"\n )\n except KeyError:\n pass\n\n # Fails if the image doesn't have all required tile tags\n if not all(tag in tags for tag in required_tile_tags):\n raise ValidationError(\"Image has incomplete tile information\")\n\n # Fails if the image only has a single resolution page\n resolution_levels = len(pages)\n if resolution_levels == 1:\n raise ValidationError(\"Image only has a single resolution level\")\n\n # Fails if the image doesn't have the chunky format\n if str(tags[\"PlanarConfiguration\"].value) != \"PLANARCONFIG.CONTIG\":\n raise ValidationError(\n \"Image planar configuration isn't configured as 'Chunky' format\"\n )\n\n # Fails if the color space isn't supported\n try:\n color_space = get_color_space(\n str(tags[\"PhotometricInterpretation\"].value)\n )\n except KeyError:\n raise ValidationError(\"Image lacks color space information\")\n\n # Fails if the amount of bytes per sample doesn't correspond to the\n # colour space\n tif_color_channels = tags[\"SamplesPerPixel\"].value\n if Image.COLOR_SPACE_COMPONENTS[color_space] != tif_color_channels:\n raise ValidationError(\"Image contains invalid amount of channels.\")\n\n try:\n image_width = tags[\"ImageWidth\"].value\n image_height = tags[\"ImageLength\"].value\n except KeyError:\n raise ValidationError(\"Missing tags in tiff file\")\n\n return GrandChallengeTiffFileTags(\n image_width=image_width,\n image_height=image_height,\n color_space=color_space,\n resolution_levels=resolution_levels,\n )\n\n\ndef get_color_space(color_space_string) -> Image.COLOR_SPACES:\n color_space_string = color_space_string.split(\".\")[1].upper()\n\n if color_space_string == \"MINISBLACK\":\n color_space = Image.COLOR_SPACE_GRAY\n else:\n try:\n color_space = dict(Image.COLOR_SPACES)[color_space_string]\n except KeyError:\n raise ValidationError(\"Invalid color space\")\n\n return color_space\n\n\ndef image_builder_tiff(path: Path) -> ImageBuilderResult:\n new_images = []\n new_image_files = []\n consumed_files = set()\n invalid_file_errors = {}\n new_folder_upload = []\n\n for file_path in path.iterdir():\n pk = uuid4()\n\n try:\n tiff_file = load_tiff_file(path=file_path)\n dzi_output = create_dzi_images(tiff_file=tiff_file, pk=pk)\n except ValidationError as e:\n invalid_file_errors[file_path.name] = str(e)\n continue\n\n image = create_tiff_image_entry(tiff_file=tiff_file, pk=pk)\n\n temp_file = TemporaryFile()\n with open(tiff_file.path.absolute(), \"rb\") as open_file:\n buffer = True\n while buffer:\n buffer = open_file.read(1024)\n temp_file.write(buffer)\n\n new_image_files.append(\n ImageFile(\n image=image,\n image_type=ImageFile.IMAGE_TYPE_TIFF,\n file=File(temp_file, name=f\"{image.pk}.tif\"),\n )\n )\n\n temp_dzi_file = TemporaryFile()\n with open(dzi_output + \".dzi\", \"rb\") as open_file:\n buffer = True\n while buffer:\n buffer = open_file.read(1024)\n temp_dzi_file.write(buffer)\n\n new_image_files.append(\n ImageFile(\n image=image,\n image_type=ImageFile.IMAGE_TYPE_DZI,\n file=File(temp_dzi_file, name=f\"{image.pk}.dzi\"),\n )\n )\n\n dzi_folder_upload = FolderUpload(\n folder=dzi_output + \"_files\", image=image\n )\n new_images.append(image)\n consumed_files.add(tiff_file.path.name)\n new_folder_upload.append(dzi_folder_upload)\n\n return ImageBuilderResult(\n consumed_files=consumed_files,\n file_errors_map=invalid_file_errors,\n new_images=new_images,\n new_image_files=new_image_files,\n new_folder_upload=new_folder_upload,\n )\n\n\ndef create_tiff_image_entry(*, tiff_file: GrandChallengeTiffFile, pk) -> Image:\n # Builds a new Image model item\n return Image(\n pk=pk,\n name=tiff_file.path.name,\n width=tiff_file.tags.image_width,\n height=tiff_file.tags.image_height,\n depth=1,\n resolution_levels=tiff_file.tags.resolution_levels,\n color_space=tiff_file.tags.color_space,\n eye_choice=Image.EYE_UNKNOWN,\n stereoscopic_choice=Image.STEREOSCOPIC_UNKNOWN,\n field_of_view=Image.FOV_UNKNOWN,\n )\n\n\ndef create_dzi_images(*, tiff_file: GrandChallengeTiffFile, pk) -> str:\n # Creates a dzi file(out.dzi) and corresponding tiles in folder {pk}_files\n dzi_output = str(tiff_file.path.parent / str(pk))\n try:\n image = pyvips.Image.new_from_file(\n str(tiff_file.path.absolute()), access=\"sequential\"\n )\n\n pyvips.Image.dzsave(\n image, dzi_output, tile_size=settings.DZI_TILE_SIZE\n )\n except Exception as e:\n raise ValidationError(\"Image can't be converted to dzi: \" + str(e))\n\n return dzi_output\n", "path": "app/grandchallenge/cases/image_builders/tiff.py"}]} | 3,203 | 585 |
gh_patches_debug_32432 | rasdani/github-patches | git_diff | optuna__optuna-3087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add missing sampler and pruner classes to a tutorial page
<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->
[Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/latest/tutorial/10_key_features/003_efficient_optimization_algorithms.html) tutorial provides lists of samplers/pruners implemented in Optuna. However a few classes are not on the list. It would be great to mention them comprehensively from the tutorial pages for Optuna users.
More concretely, please add the following entries to the tutorial page.
## Samplers
- `optuna.samplers.PartialFixedSampler`
- `optuna.samplers.NSGAIISampler`
## Pruners
- `optuna.pruners.PatientPruner`
- `optuna.pruners.PercentilePruner`
</issue>
<code>
[start of tutorial/10_key_features/003_efficient_optimization_algorithms.py]
1 """
2 .. _pruning:
3
4 Efficient Optimization Algorithms
5 =================================
6
7 Optuna enables efficient hyperparameter optimization by
8 adopting state-of-the-art algorithms for sampling hyperparameters and
9 pruning efficiently unpromising trials.
10
11 Sampling Algorithms
12 -------------------
13
14 Samplers basically continually narrow down the search space using the records of suggested parameter values and evaluated objective values,
15 leading to an optimal search space which giving off parameters leading to better objective values.
16 More detailed explanation of how samplers suggest parameters is in :class:`optuna.samplers.BaseSampler`.
17
18 Optuna provides the following sampling algorithms:
19
20 - Tree-structured Parzen Estimator algorithm implemented in :class:`optuna.samplers.TPESampler`
21
22 - CMA-ES based algorithm implemented in :class:`optuna.samplers.CmaEsSampler`
23
24 - Grid Search implemented in :class:`optuna.samplers.GridSampler`
25
26 - Random Search implemented in :class:`optuna.samplers.RandomSampler`
27
28 The default sampler is :class:`optuna.samplers.TPESampler`.
29
30 Switching Samplers
31 ------------------
32
33 """
34
35 import optuna
36
37
38 ###################################################################################################
39 # By default, Optuna uses :class:`~optuna.samplers.TPESampler` as follows.
40
41 study = optuna.create_study()
42 print(f"Sampler is {study.sampler.__class__.__name__}")
43
44 ###################################################################################################
45 # If you want to use different samplers for example :class:`~optuna.samplers.RandomSampler`
46 # and :class:`~optuna.samplers.CmaEsSampler`,
47
48 study = optuna.create_study(sampler=optuna.samplers.RandomSampler())
49 print(f"Sampler is {study.sampler.__class__.__name__}")
50
51 study = optuna.create_study(sampler=optuna.samplers.CmaEsSampler())
52 print(f"Sampler is {study.sampler.__class__.__name__}")
53
54
55 ###################################################################################################
56 # Pruning Algorithms
57 # ------------------
58 #
59 # ``Pruners`` automatically stop unpromising trials at the early stages of the training (a.k.a., automated early-stopping).
60 #
61 # Optuna provides the following pruning algorithms:
62 #
63 # - Asynchronous Successive Halving algorithm implemented in :class:`optuna.pruners.SuccessiveHalvingPruner`
64 #
65 # - Hyperband algorithm implemented in :class:`optuna.pruners.HyperbandPruner`
66 #
67 # - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`
68 #
69 # - Threshold pruning algorithm implemented in :class:`optuna.pruners.ThresholdPruner`
70 #
71 # We use :class:`optuna.pruners.MedianPruner` in most examples,
72 # though basically it is outperformed by :class:`optuna.pruners.SuccessiveHalvingPruner` and
73 # :class:`optuna.pruners.HyperbandPruner` as in `this benchmark result <https://github.com/optuna/optuna/wiki/Benchmarks-with-Kurobako>`_.
74 #
75 #
76 # Activating Pruners
77 # ------------------
78 # To turn on the pruning feature, you need to call :func:`~optuna.trial.Trial.report` and :func:`~optuna.trial.Trial.should_prune` after each step of the iterative training.
79 # :func:`~optuna.trial.Trial.report` periodically monitors the intermediate objective values.
80 # :func:`~optuna.trial.Trial.should_prune` decides termination of the trial that does not meet a predefined condition.
81 #
82 # We would recommend using integration modules for major machine learning frameworks.
83 # Exclusive list is :mod:`optuna.integration` and usecases are available in `optuna/examples <https://github.com/optuna/optuna-examples/>`_.
84
85
86 import logging
87 import sys
88
89 import sklearn.datasets
90 import sklearn.linear_model
91 import sklearn.model_selection
92
93
94 def objective(trial):
95 iris = sklearn.datasets.load_iris()
96 classes = list(set(iris.target))
97 train_x, valid_x, train_y, valid_y = sklearn.model_selection.train_test_split(
98 iris.data, iris.target, test_size=0.25, random_state=0
99 )
100
101 alpha = trial.suggest_float("alpha", 1e-5, 1e-1, log=True)
102 clf = sklearn.linear_model.SGDClassifier(alpha=alpha)
103
104 for step in range(100):
105 clf.partial_fit(train_x, train_y, classes=classes)
106
107 # Report intermediate objective value.
108 intermediate_value = 1.0 - clf.score(valid_x, valid_y)
109 trial.report(intermediate_value, step)
110
111 # Handle pruning based on the intermediate value.
112 if trial.should_prune():
113 raise optuna.TrialPruned()
114
115 return 1.0 - clf.score(valid_x, valid_y)
116
117
118 ###################################################################################################
119 # Set up the median stopping rule as the pruning condition.
120
121 # Add stream handler of stdout to show the messages
122 optuna.logging.get_logger("optuna").addHandler(logging.StreamHandler(sys.stdout))
123 study = optuna.create_study(pruner=optuna.pruners.MedianPruner())
124 study.optimize(objective, n_trials=20)
125
126 ###################################################################################################
127 # As you can see, several trials were pruned (stopped) before they finished all of the iterations.
128 # The format of message is ``"Trial <Trial Number> pruned."``.
129
130 ###################################################################################################
131 # Which Sampler and Pruner Should be Used?
132 # ----------------------------------------
133 #
134 # From the benchmark results which are available at `optuna/optuna - wiki "Benchmarks with Kurobako" <https://github.com/optuna/optuna/wiki/Benchmarks-with-Kurobako>`_, at least for not deep learning tasks, we would say that
135 #
136 # * For :class:`optuna.samplers.RandomSampler`, :class:`optuna.pruners.MedianPruner` is the best.
137 # * For :class:`optuna.samplers.TPESampler`, :class:`optuna.pruners.Hyperband` is the best.
138 #
139 # However, note that the benchmark is not deep learning.
140 # For deep learning tasks,
141 # consult the below table.
142 # This table is from the `Ozaki et al., Hyperparameter Optimization Methods: Overview and Characteristics, in IEICE Trans, Vol.J103-D No.9 pp.615-631, 2020 <https://doi.org/10.14923/transinfj.2019JDR0003>`_ paper,
143 # which is written in Japanese.
144 #
145 # +---------------------------+-----------------------------------------+---------------------------------------------------------------+
146 # | Parallel Compute Resource | Categorical/Conditional Hyperparameters | Recommended Algorithms |
147 # +===========================+=========================================+===============================================================+
148 # | Limited | No | TPE. GP-EI if search space is low-dimensional and continuous. |
149 # + +-----------------------------------------+---------------------------------------------------------------+
150 # | | Yes | TPE. GP-EI if search space is low-dimensional and continuous |
151 # +---------------------------+-----------------------------------------+---------------------------------------------------------------+
152 # | Sufficient | No | CMA-ES, Random Search |
153 # + +-----------------------------------------+---------------------------------------------------------------+
154 # | | Yes | Random Search or Genetic Algorithm |
155 # +---------------------------+-----------------------------------------+---------------------------------------------------------------+
156 #
157
158 ###################################################################################################
159 # Integration Modules for Pruning
160 # -------------------------------
161 # To implement pruning mechanism in much simpler forms, Optuna provides integration modules for the following libraries.
162 #
163 # For the complete list of Optuna's integration modules, see :mod:`optuna.integration`.
164 #
165 # For example, :class:`~optuna.integration.XGBoostPruningCallback` introduces pruning without directly changing the logic of training iteration.
166 # (See also `example <https://github.com/optuna/optuna-examples/tree/main/xgboost/xgboost_integration.py>`_ for the entire script.)
167 #
168 # .. code-block:: python
169 #
170 # pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'validation-error')
171 # bst = xgb.train(param, dtrain, evals=[(dvalid, 'validation')], callbacks=[pruning_callback])
172
[end of tutorial/10_key_features/003_efficient_optimization_algorithms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tutorial/10_key_features/003_efficient_optimization_algorithms.py b/tutorial/10_key_features/003_efficient_optimization_algorithms.py
--- a/tutorial/10_key_features/003_efficient_optimization_algorithms.py
+++ b/tutorial/10_key_features/003_efficient_optimization_algorithms.py
@@ -17,13 +17,17 @@
Optuna provides the following sampling algorithms:
+- Grid Search implemented in :class:`optuna.samplers.GridSampler`
+
+- Random Search implemented in :class:`optuna.samplers.RandomSampler`
+
- Tree-structured Parzen Estimator algorithm implemented in :class:`optuna.samplers.TPESampler`
- CMA-ES based algorithm implemented in :class:`optuna.samplers.CmaEsSampler`
-- Grid Search implemented in :class:`optuna.samplers.GridSampler`
+- Algorithm to enable partial fixed parameters implemented in :class:`optuna.samplers.PartialFixedSampler`
-- Random Search implemented in :class:`optuna.samplers.RandomSampler`
+- Nondominated Sorting Genetic Algorithm II implemented in :class:`optuna.samplers.NSGAIISampler`
The default sampler is :class:`optuna.samplers.TPESampler`.
@@ -60,12 +64,18 @@
#
# Optuna provides the following pruning algorithms:
#
+# - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`
+#
+# - Non-pruning algorithm implementd in :class:`optuna.pruners.NopPruner`
+#
+# - Algorithm to operate pruner with tolerance implemented in :class:`optuna.pruners.PatientPruner`
+#
+# - Algorithm to prune specified percentile of trials implemented in :class:`optuna.pruners.PercentilePruner`
+#
# - Asynchronous Successive Halving algorithm implemented in :class:`optuna.pruners.SuccessiveHalvingPruner`
#
# - Hyperband algorithm implemented in :class:`optuna.pruners.HyperbandPruner`
#
-# - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`
-#
# - Threshold pruning algorithm implemented in :class:`optuna.pruners.ThresholdPruner`
#
# We use :class:`optuna.pruners.MedianPruner` in most examples,
| {"golden_diff": "diff --git a/tutorial/10_key_features/003_efficient_optimization_algorithms.py b/tutorial/10_key_features/003_efficient_optimization_algorithms.py\n--- a/tutorial/10_key_features/003_efficient_optimization_algorithms.py\n+++ b/tutorial/10_key_features/003_efficient_optimization_algorithms.py\n@@ -17,13 +17,17 @@\n \n Optuna provides the following sampling algorithms:\n \n+- Grid Search implemented in :class:`optuna.samplers.GridSampler`\n+\n+- Random Search implemented in :class:`optuna.samplers.RandomSampler`\n+\n - Tree-structured Parzen Estimator algorithm implemented in :class:`optuna.samplers.TPESampler`\n \n - CMA-ES based algorithm implemented in :class:`optuna.samplers.CmaEsSampler`\n \n-- Grid Search implemented in :class:`optuna.samplers.GridSampler`\n+- Algorithm to enable partial fixed parameters implemented in :class:`optuna.samplers.PartialFixedSampler`\n \n-- Random Search implemented in :class:`optuna.samplers.RandomSampler`\n+- Nondominated Sorting Genetic Algorithm II implemented in :class:`optuna.samplers.NSGAIISampler`\n \n The default sampler is :class:`optuna.samplers.TPESampler`.\n \n@@ -60,12 +64,18 @@\n #\n # Optuna provides the following pruning algorithms:\n #\n+# - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`\n+#\n+# - Non-pruning algorithm implementd in :class:`optuna.pruners.NopPruner`\n+#\n+# - Algorithm to operate pruner with tolerance implemented in :class:`optuna.pruners.PatientPruner`\n+#\n+# - Algorithm to prune specified percentile of trials implemented in :class:`optuna.pruners.PercentilePruner`\n+#\n # - Asynchronous Successive Halving algorithm implemented in :class:`optuna.pruners.SuccessiveHalvingPruner`\n #\n # - Hyperband algorithm implemented in :class:`optuna.pruners.HyperbandPruner`\n #\n-# - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`\n-#\n # - Threshold pruning algorithm implemented in :class:`optuna.pruners.ThresholdPruner`\n #\n # We use :class:`optuna.pruners.MedianPruner` in most examples,\n", "issue": "Add missing sampler and pruner classes to a tutorial page\n<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->\r\n\r\n[Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/latest/tutorial/10_key_features/003_efficient_optimization_algorithms.html) tutorial provides lists of samplers/pruners implemented in Optuna. However a few classes are not on the list. It would be great to mention them comprehensively from the tutorial pages for Optuna users. \r\n\r\n\r\nMore concretely, please add the following entries to the tutorial page.\r\n\r\n## Samplers\r\n\r\n- `optuna.samplers.PartialFixedSampler`\r\n- `optuna.samplers.NSGAIISampler`\r\n\r\n## Pruners\r\n- `optuna.pruners.PatientPruner`\r\n- `optuna.pruners.PercentilePruner`\r\n\n", "before_files": [{"content": "\"\"\"\n.. _pruning:\n\nEfficient Optimization Algorithms\n=================================\n\nOptuna enables efficient hyperparameter optimization by\nadopting state-of-the-art algorithms for sampling hyperparameters and\npruning efficiently unpromising trials.\n\nSampling Algorithms\n-------------------\n\nSamplers basically continually narrow down the search space using the records of suggested parameter values and evaluated objective values,\nleading to an optimal search space which giving off parameters leading to better objective values.\nMore detailed explanation of how samplers suggest parameters is in :class:`optuna.samplers.BaseSampler`.\n\nOptuna provides the following sampling algorithms:\n\n- Tree-structured Parzen Estimator algorithm implemented in :class:`optuna.samplers.TPESampler`\n\n- CMA-ES based algorithm implemented in :class:`optuna.samplers.CmaEsSampler`\n\n- Grid Search implemented in :class:`optuna.samplers.GridSampler`\n\n- Random Search implemented in :class:`optuna.samplers.RandomSampler`\n\nThe default sampler is :class:`optuna.samplers.TPESampler`.\n\nSwitching Samplers\n------------------\n\n\"\"\"\n\nimport optuna\n\n\n###################################################################################################\n# By default, Optuna uses :class:`~optuna.samplers.TPESampler` as follows.\n\nstudy = optuna.create_study()\nprint(f\"Sampler is {study.sampler.__class__.__name__}\")\n\n###################################################################################################\n# If you want to use different samplers for example :class:`~optuna.samplers.RandomSampler`\n# and :class:`~optuna.samplers.CmaEsSampler`,\n\nstudy = optuna.create_study(sampler=optuna.samplers.RandomSampler())\nprint(f\"Sampler is {study.sampler.__class__.__name__}\")\n\nstudy = optuna.create_study(sampler=optuna.samplers.CmaEsSampler())\nprint(f\"Sampler is {study.sampler.__class__.__name__}\")\n\n\n###################################################################################################\n# Pruning Algorithms\n# ------------------\n#\n# ``Pruners`` automatically stop unpromising trials at the early stages of the training (a.k.a., automated early-stopping).\n#\n# Optuna provides the following pruning algorithms:\n#\n# - Asynchronous Successive Halving algorithm implemented in :class:`optuna.pruners.SuccessiveHalvingPruner`\n#\n# - Hyperband algorithm implemented in :class:`optuna.pruners.HyperbandPruner`\n#\n# - Median pruning algorithm implemented in :class:`optuna.pruners.MedianPruner`\n#\n# - Threshold pruning algorithm implemented in :class:`optuna.pruners.ThresholdPruner`\n#\n# We use :class:`optuna.pruners.MedianPruner` in most examples,\n# though basically it is outperformed by :class:`optuna.pruners.SuccessiveHalvingPruner` and\n# :class:`optuna.pruners.HyperbandPruner` as in `this benchmark result <https://github.com/optuna/optuna/wiki/Benchmarks-with-Kurobako>`_.\n#\n#\n# Activating Pruners\n# ------------------\n# To turn on the pruning feature, you need to call :func:`~optuna.trial.Trial.report` and :func:`~optuna.trial.Trial.should_prune` after each step of the iterative training.\n# :func:`~optuna.trial.Trial.report` periodically monitors the intermediate objective values.\n# :func:`~optuna.trial.Trial.should_prune` decides termination of the trial that does not meet a predefined condition.\n#\n# We would recommend using integration modules for major machine learning frameworks.\n# Exclusive list is :mod:`optuna.integration` and usecases are available in `optuna/examples <https://github.com/optuna/optuna-examples/>`_.\n\n\nimport logging\nimport sys\n\nimport sklearn.datasets\nimport sklearn.linear_model\nimport sklearn.model_selection\n\n\ndef objective(trial):\n iris = sklearn.datasets.load_iris()\n classes = list(set(iris.target))\n train_x, valid_x, train_y, valid_y = sklearn.model_selection.train_test_split(\n iris.data, iris.target, test_size=0.25, random_state=0\n )\n\n alpha = trial.suggest_float(\"alpha\", 1e-5, 1e-1, log=True)\n clf = sklearn.linear_model.SGDClassifier(alpha=alpha)\n\n for step in range(100):\n clf.partial_fit(train_x, train_y, classes=classes)\n\n # Report intermediate objective value.\n intermediate_value = 1.0 - clf.score(valid_x, valid_y)\n trial.report(intermediate_value, step)\n\n # Handle pruning based on the intermediate value.\n if trial.should_prune():\n raise optuna.TrialPruned()\n\n return 1.0 - clf.score(valid_x, valid_y)\n\n\n###################################################################################################\n# Set up the median stopping rule as the pruning condition.\n\n# Add stream handler of stdout to show the messages\noptuna.logging.get_logger(\"optuna\").addHandler(logging.StreamHandler(sys.stdout))\nstudy = optuna.create_study(pruner=optuna.pruners.MedianPruner())\nstudy.optimize(objective, n_trials=20)\n\n###################################################################################################\n# As you can see, several trials were pruned (stopped) before they finished all of the iterations.\n# The format of message is ``\"Trial <Trial Number> pruned.\"``.\n\n###################################################################################################\n# Which Sampler and Pruner Should be Used?\n# ----------------------------------------\n#\n# From the benchmark results which are available at `optuna/optuna - wiki \"Benchmarks with Kurobako\" <https://github.com/optuna/optuna/wiki/Benchmarks-with-Kurobako>`_, at least for not deep learning tasks, we would say that\n#\n# * For :class:`optuna.samplers.RandomSampler`, :class:`optuna.pruners.MedianPruner` is the best.\n# * For :class:`optuna.samplers.TPESampler`, :class:`optuna.pruners.Hyperband` is the best.\n#\n# However, note that the benchmark is not deep learning.\n# For deep learning tasks,\n# consult the below table.\n# This table is from the `Ozaki et al., Hyperparameter Optimization Methods: Overview and Characteristics, in IEICE Trans, Vol.J103-D No.9 pp.615-631, 2020 <https://doi.org/10.14923/transinfj.2019JDR0003>`_ paper,\n# which is written in Japanese.\n#\n# +---------------------------+-----------------------------------------+---------------------------------------------------------------+\n# | Parallel Compute Resource | Categorical/Conditional Hyperparameters | Recommended Algorithms |\n# +===========================+=========================================+===============================================================+\n# | Limited | No | TPE. GP-EI if search space is low-dimensional and continuous. |\n# + +-----------------------------------------+---------------------------------------------------------------+\n# | | Yes | TPE. GP-EI if search space is low-dimensional and continuous |\n# +---------------------------+-----------------------------------------+---------------------------------------------------------------+\n# | Sufficient | No | CMA-ES, Random Search |\n# + +-----------------------------------------+---------------------------------------------------------------+\n# | | Yes | Random Search or Genetic Algorithm |\n# +---------------------------+-----------------------------------------+---------------------------------------------------------------+\n#\n\n###################################################################################################\n# Integration Modules for Pruning\n# -------------------------------\n# To implement pruning mechanism in much simpler forms, Optuna provides integration modules for the following libraries.\n#\n# For the complete list of Optuna's integration modules, see :mod:`optuna.integration`.\n#\n# For example, :class:`~optuna.integration.XGBoostPruningCallback` introduces pruning without directly changing the logic of training iteration.\n# (See also `example <https://github.com/optuna/optuna-examples/tree/main/xgboost/xgboost_integration.py>`_ for the entire script.)\n#\n# .. code-block:: python\n#\n# pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'validation-error')\n# bst = xgb.train(param, dtrain, evals=[(dvalid, 'validation')], callbacks=[pruning_callback])\n", "path": "tutorial/10_key_features/003_efficient_optimization_algorithms.py"}]} | 2,868 | 510 |
gh_patches_debug_44031 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-26 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support forward references
See: https://www.python.org/dev/peps/pep-0563/#forward-references
Right now the following code would break:
```python
from __future__ import annotations
import strawberry
import typing
@strawberry.type
class User:
name: str
friend: typing.Optional[User]
```
This is the error we get:
```
File "/Users/patrickarminio/Documents/personal/temp/stra/app.py", line 7, in <module>
from schema import schema
File "/Users/patrickarminio/Documents/personal/temp/stra/schema.py", line 10, in <module>
@strawberry.type
File "/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py", line 60, in type
return wrap()
File "/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py", line 55, in wrap
cls._fields = _get_fields(cls)
File "/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py", line 27, in _get_fields
cls_annotations = typing.get_type_hints(cls)
File "/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py", line 973, in get_type_hints
value = _eval_type(value, base_globals, localns)
File "/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py", line 260, in _eval_type
return t._evaluate(globalns, localns)
File "/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py", line 464, in _evaluate
eval(self.__forward_code__, globalns, localns),
File "<string>", line 1, in <module>
NameError: name 'User' is not defined
```
</issue>
<code>
[start of strawberry/type.py]
1 import typing
2
3 from dataclasses import dataclass
4 from graphql import GraphQLField, GraphQLObjectType
5 from graphql.utilities.schema_printer import print_type
6
7 from .constants import IS_STRAWBERRY_FIELD
8 from .type_converter import get_graphql_type_for_annotation
9
10
11 def _get_resolver(cls, field_name):
12 def _resolver(obj, info):
13 # TODO: can we make this nicer?
14 # does it work in all the cases?
15
16 field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)
17
18 if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):
19 return field_resolver(obj, info)
20
21 return field_resolver
22
23 return _resolver
24
25
26 def _get_fields(cls):
27 cls_annotations = typing.get_type_hints(cls)
28
29 fields = {
30 key: GraphQLField(
31 get_graphql_type_for_annotation(value, field_name=key),
32 resolve=_get_resolver(cls, key),
33 )
34 for key, value in cls_annotations.items()
35 }
36
37 fields.update(
38 {
39 key: value.field
40 for key, value in cls.__dict__.items()
41 if getattr(value, IS_STRAWBERRY_FIELD, False)
42 }
43 )
44
45 return fields
46
47
48 def type(cls):
49 def wrap():
50 def repr_(self):
51 return print_type(self.field)
52
53 setattr(cls, "__repr__", repr_)
54
55 cls._fields = _get_fields(cls)
56 cls.field = GraphQLObjectType(name=cls.__name__, fields=cls._fields)
57
58 return dataclass(cls, repr=False)
59
60 return wrap()
61
[end of strawberry/type.py]
[start of strawberry/type_converter.py]
1 from graphql import (
2 GraphQLBoolean,
3 GraphQLFloat,
4 GraphQLID,
5 GraphQLInt,
6 GraphQLList,
7 GraphQLNonNull,
8 GraphQLString,
9 GraphQLUnionType,
10 )
11
12 from .scalars import ID
13
14
15 TYPE_MAP = {
16 str: GraphQLString,
17 int: GraphQLInt,
18 float: GraphQLFloat,
19 bool: GraphQLBoolean,
20 ID: GraphQLID,
21 }
22
23
24 # TODO: make so that we don't pass force optional
25 # we use that when trying to get the type for a
26 # option field (which can either be a scalar or an object type)
27 def get_graphql_type_for_annotation(
28 annotation, field_name: str, force_optional: bool = False
29 ):
30 # TODO: nice error
31
32 is_optional = False
33
34 # TODO: this might lead to issues with types that have a field value
35 if hasattr(annotation, "field"):
36 graphql_type = annotation.field
37 else:
38 annotation_name = getattr(annotation, "_name", None)
39
40 if annotation_name == "List":
41 list_of_type = get_graphql_type_for_annotation(
42 annotation.__args__[0], field_name
43 )
44
45 return GraphQLList(list_of_type)
46
47 # for some reason _name is None for Optional and Union types, so we check if we
48 # have __args__ populated, there might be some edge cases where __args__ is
49 # populated but the type is not an Union, like in the above case with Lists
50 if hasattr(annotation, "__args__"):
51 types = annotation.__args__
52 non_none_types = [x for x in types if x != type(None)] # noqa:E721
53
54 # optionals are represented as Union[type, None]
55 if len(non_none_types) == 1:
56 is_optional = True
57 graphql_type = get_graphql_type_for_annotation(
58 non_none_types[0], field_name, force_optional=True
59 )
60 else:
61 is_optional = type(None) in types
62
63 # TODO: union types don't work with scalar types
64 # so we want to return a nice error
65 # also we want to make sure we have been passed
66 # strawberry types
67 graphql_type = GraphQLUnionType(
68 field_name, [type.field for type in types]
69 )
70 else:
71 graphql_type = TYPE_MAP.get(annotation)
72
73 if not graphql_type:
74 raise ValueError(f"Unable to get GraphQL type for {annotation}")
75
76 if is_optional or force_optional:
77 return graphql_type
78
79 return GraphQLNonNull(graphql_type)
80
[end of strawberry/type_converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/type.py b/strawberry/type.py
--- a/strawberry/type.py
+++ b/strawberry/type.py
@@ -5,7 +5,7 @@
from graphql.utilities.schema_printer import print_type
from .constants import IS_STRAWBERRY_FIELD
-from .type_converter import get_graphql_type_for_annotation
+from .type_converter import REGISTRY, get_graphql_type_for_annotation
def _get_resolver(cls, field_name):
@@ -23,37 +23,39 @@
return _resolver
-def _get_fields(cls):
- cls_annotations = typing.get_type_hints(cls)
-
- fields = {
- key: GraphQLField(
- get_graphql_type_for_annotation(value, field_name=key),
- resolve=_get_resolver(cls, key),
- )
- for key, value in cls_annotations.items()
- }
-
- fields.update(
- {
- key: value.field
- for key, value in cls.__dict__.items()
- if getattr(value, IS_STRAWBERRY_FIELD, False)
- }
- )
-
- return fields
-
-
def type(cls):
def wrap():
+ name = cls.__name__
+ REGISTRY[name] = cls
+
def repr_(self):
return print_type(self.field)
setattr(cls, "__repr__", repr_)
- cls._fields = _get_fields(cls)
- cls.field = GraphQLObjectType(name=cls.__name__, fields=cls._fields)
+ annotations = typing.get_type_hints(cls, None, REGISTRY)
+
+ def _get_fields():
+
+ fields = {
+ key: GraphQLField(
+ get_graphql_type_for_annotation(value, key),
+ resolve=_get_resolver(cls, key),
+ )
+ for key, value in annotations.items()
+ }
+
+ fields.update(
+ {
+ key: value.field
+ for key, value in cls.__dict__.items()
+ if getattr(value, IS_STRAWBERRY_FIELD, False)
+ }
+ )
+
+ return fields
+
+ cls.field = GraphQLObjectType(name, lambda: _get_fields())
return dataclass(cls, repr=False)
diff --git a/strawberry/type_converter.py b/strawberry/type_converter.py
--- a/strawberry/type_converter.py
+++ b/strawberry/type_converter.py
@@ -12,7 +12,7 @@
from .scalars import ID
-TYPE_MAP = {
+REGISTRY = {
str: GraphQLString,
int: GraphQLInt,
float: GraphQLFloat,
@@ -27,11 +27,9 @@
def get_graphql_type_for_annotation(
annotation, field_name: str, force_optional: bool = False
):
- # TODO: nice error
-
- is_optional = False
-
# TODO: this might lead to issues with types that have a field value
+ is_optional = force_optional
+
if hasattr(annotation, "field"):
graphql_type = annotation.field
else:
@@ -49,7 +47,7 @@
# populated but the type is not an Union, like in the above case with Lists
if hasattr(annotation, "__args__"):
types = annotation.__args__
- non_none_types = [x for x in types if x != type(None)] # noqa:E721
+ non_none_types = [x for x in types if x != None.__class__] # noqa:E721
# optionals are represented as Union[type, None]
if len(non_none_types) == 1:
@@ -58,7 +56,7 @@
non_none_types[0], field_name, force_optional=True
)
else:
- is_optional = type(None) in types
+ is_optional = None.__class__ in types
# TODO: union types don't work with scalar types
# so we want to return a nice error
@@ -68,12 +66,12 @@
field_name, [type.field for type in types]
)
else:
- graphql_type = TYPE_MAP.get(annotation)
+ graphql_type = REGISTRY.get(annotation)
if not graphql_type:
raise ValueError(f"Unable to get GraphQL type for {annotation}")
- if is_optional or force_optional:
+ if is_optional:
return graphql_type
return GraphQLNonNull(graphql_type)
| {"golden_diff": "diff --git a/strawberry/type.py b/strawberry/type.py\n--- a/strawberry/type.py\n+++ b/strawberry/type.py\n@@ -5,7 +5,7 @@\n from graphql.utilities.schema_printer import print_type\n \n from .constants import IS_STRAWBERRY_FIELD\n-from .type_converter import get_graphql_type_for_annotation\n+from .type_converter import REGISTRY, get_graphql_type_for_annotation\n \n \n def _get_resolver(cls, field_name):\n@@ -23,37 +23,39 @@\n return _resolver\n \n \n-def _get_fields(cls):\n- cls_annotations = typing.get_type_hints(cls)\n-\n- fields = {\n- key: GraphQLField(\n- get_graphql_type_for_annotation(value, field_name=key),\n- resolve=_get_resolver(cls, key),\n- )\n- for key, value in cls_annotations.items()\n- }\n-\n- fields.update(\n- {\n- key: value.field\n- for key, value in cls.__dict__.items()\n- if getattr(value, IS_STRAWBERRY_FIELD, False)\n- }\n- )\n-\n- return fields\n-\n-\n def type(cls):\n def wrap():\n+ name = cls.__name__\n+ REGISTRY[name] = cls\n+\n def repr_(self):\n return print_type(self.field)\n \n setattr(cls, \"__repr__\", repr_)\n \n- cls._fields = _get_fields(cls)\n- cls.field = GraphQLObjectType(name=cls.__name__, fields=cls._fields)\n+ annotations = typing.get_type_hints(cls, None, REGISTRY)\n+\n+ def _get_fields():\n+\n+ fields = {\n+ key: GraphQLField(\n+ get_graphql_type_for_annotation(value, key),\n+ resolve=_get_resolver(cls, key),\n+ )\n+ for key, value in annotations.items()\n+ }\n+\n+ fields.update(\n+ {\n+ key: value.field\n+ for key, value in cls.__dict__.items()\n+ if getattr(value, IS_STRAWBERRY_FIELD, False)\n+ }\n+ )\n+\n+ return fields\n+\n+ cls.field = GraphQLObjectType(name, lambda: _get_fields())\n \n return dataclass(cls, repr=False)\n \ndiff --git a/strawberry/type_converter.py b/strawberry/type_converter.py\n--- a/strawberry/type_converter.py\n+++ b/strawberry/type_converter.py\n@@ -12,7 +12,7 @@\n from .scalars import ID\n \n \n-TYPE_MAP = {\n+REGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n@@ -27,11 +27,9 @@\n def get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n ):\n- # TODO: nice error\n-\n- is_optional = False\n-\n # TODO: this might lead to issues with types that have a field value\n+ is_optional = force_optional\n+\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n@@ -49,7 +47,7 @@\n # populated but the type is not an Union, like in the above case with Lists\n if hasattr(annotation, \"__args__\"):\n types = annotation.__args__\n- non_none_types = [x for x in types if x != type(None)] # noqa:E721\n+ non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n \n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n@@ -58,7 +56,7 @@\n non_none_types[0], field_name, force_optional=True\n )\n else:\n- is_optional = type(None) in types\n+ is_optional = None.__class__ in types\n \n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n@@ -68,12 +66,12 @@\n field_name, [type.field for type in types]\n )\n else:\n- graphql_type = TYPE_MAP.get(annotation)\n+ graphql_type = REGISTRY.get(annotation)\n \n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n \n- if is_optional or force_optional:\n+ if is_optional:\n return graphql_type\n \n return GraphQLNonNull(graphql_type)\n", "issue": "Support forward references\nSee: https://www.python.org/dev/peps/pep-0563/#forward-references\r\n\r\nRight now the following code would break:\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nimport strawberry\r\nimport typing\r\n\r\[email protected]\r\nclass User:\r\n name: str\r\n friend: typing.Optional[User]\r\n```\r\n\r\nThis is the error we get:\r\n\r\n```\r\n File \"/Users/patrickarminio/Documents/personal/temp/stra/app.py\", line 7, in <module>\r\n from schema import schema\r\n File \"/Users/patrickarminio/Documents/personal/temp/stra/schema.py\", line 10, in <module>\r\n @strawberry.type\r\n File \"/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py\", line 60, in type\r\n return wrap()\r\n File \"/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py\", line 55, in wrap\r\n cls._fields = _get_fields(cls)\r\n File \"/Users/patrickarminio/.virtualenvs/stra-so-aNvo2/lib/python3.7/site-packages/strawberry/type.py\", line 27, in _get_fields\r\n cls_annotations = typing.get_type_hints(cls)\r\n File \"/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py\", line 973, in get_type_hints\r\n value = _eval_type(value, base_globals, localns)\r\n File \"/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py\", line 260, in _eval_type\r\n return t._evaluate(globalns, localns)\r\n File \"/Users/patrickarminio/.pyenv/versions/3.7.0/lib/python3.7/typing.py\", line 464, in _evaluate\r\n eval(self.__forward_code__, globalns, localns),\r\n File \"<string>\", line 1, in <module>\r\nNameError: name 'User' is not defined\r\n```\n", "before_files": [{"content": "import typing\n\nfrom dataclasses import dataclass\nfrom graphql import GraphQLField, GraphQLObjectType\nfrom graphql.utilities.schema_printer import print_type\n\nfrom .constants import IS_STRAWBERRY_FIELD\nfrom .type_converter import get_graphql_type_for_annotation\n\n\ndef _get_resolver(cls, field_name):\n def _resolver(obj, info):\n # TODO: can we make this nicer?\n # does it work in all the cases?\n\n field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(obj, info)\n\n return field_resolver\n\n return _resolver\n\n\ndef _get_fields(cls):\n cls_annotations = typing.get_type_hints(cls)\n\n fields = {\n key: GraphQLField(\n get_graphql_type_for_annotation(value, field_name=key),\n resolve=_get_resolver(cls, key),\n )\n for key, value in cls_annotations.items()\n }\n\n fields.update(\n {\n key: value.field\n for key, value in cls.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n return fields\n\n\ndef type(cls):\n def wrap():\n def repr_(self):\n return print_type(self.field)\n\n setattr(cls, \"__repr__\", repr_)\n\n cls._fields = _get_fields(cls)\n cls.field = GraphQLObjectType(name=cls.__name__, fields=cls._fields)\n\n return dataclass(cls, repr=False)\n\n return wrap()\n", "path": "strawberry/type.py"}, {"content": "from graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .scalars import ID\n\n\nTYPE_MAP = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: nice error\n\n is_optional = False\n\n # TODO: this might lead to issues with types that have a field value\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n return GraphQLList(list_of_type)\n\n # for some reason _name is None for Optional and Union types, so we check if we\n # have __args__ populated, there might be some edge cases where __args__ is\n # populated but the type is not an Union, like in the above case with Lists\n if hasattr(annotation, \"__args__\"):\n types = annotation.__args__\n non_none_types = [x for x in types if x != type(None)] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_optional = type(None) in types\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n else:\n graphql_type = TYPE_MAP.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_optional or force_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n", "path": "strawberry/type_converter.py"}]} | 2,194 | 982 |
gh_patches_debug_14469 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MO failing since at least 2018-05-24
MO has been failing since 2018-05-24
Based on automated runs it appears that MO has not run successfully in 2 days (2018-05-24).
```
06:09:50 INFO billy: Save committee on Government Oversight, Standing
06:09:50 INFO billy: Save committee on Tax Policy, Statutory
06:09:50 INFO billy: Save committee on Litigation Reform, Standing
06:09:50 INFO billy: Save committee Rules - Legislative Oversight, Standing
06:09:50 INFO billy: Save committee Investigative on Oversight, Standing
06:09:50 INFO billy: Save committee Agriculture Policy, Standing
06:09:50 INFO billy: Save committee Transportation, Infrastructure and Public Safety
06:09:50 INFO billy: Save committee Agriculture, Food Production, and Outdoor Resources
06:09:50 INFO billy: Save committee Professional Registration
06:09:50 INFO billy: Save committee on Employment Security, Standing
06:09:50 INFO billy: Save committee Fiscal Oversight
06:09:50 INFO billy: Save committee Workforce Development, Standing
06:09:50 INFO billy: Save committee General Laws, Standing
06:09:50 INFO billy: Save committee Subcommittee on Appropriations - Public Safety, Corrections, Transportation, and Revenue, Subcommittee
06:09:50 INFO billy: Save committee Subcommittee on Mass Transit Security, Subcommittee
06:09:50 INFO billy: Save committee Subcommittee on Second Amendment Preservation, Subcommittee
06:09:50 INFO billy: Save committee on Homeland Security, Standing
06:09:50 INFO billy: Save committee Ways and Means
06:09:50 INFO billy: Save committee Judiciary, Standing
06:09:50 INFO billy: Save committee Subcommittee on Ports, Subcommittee
06:09:50 INFO billy: Save committee Corrections and Public Institutions, Standing
06:09:50 INFO billy: Save bill lower 2018S1: HR 2
06:09:50 WARNING billy: Value u'2018S1' for field '<obj>.session' is not in the enumeration: ['2012', '2013', '2014', '2015', '2016', '2017', '2017S1', '2017S2', '2018']
06:09:50 INFO billy: Save bill lower 2018S1: HR 1
06:09:50 WARNING billy: Value u'2018S1' for field '<obj>.session' is not in the enumeration: ['2012', '2013', '2014', '2015', '2016', '2017', '2017S1', '2017S2', '2018']
06:09:50 INFO billy: Save vote 2018S1 lower: HR 2 'Reported Do Pass (H) - '
06:09:50 WARNING billy: Value None for field '<obj>.yes_count' is not of type integer
06:09:52 INFO billy: billy-update abbr=mo
actions=import,report
types=bills,legislators,votes,committees,alldata
sessions=2018
terms=2017-2018
06:09:52 INFO billy: Finished importing 194 legislator files.
06:09:53 INFO billy: imported 1 vote files
Traceback (most recent call last):
File "/opt/openstates/venv-billy//bin/billy-update", line 11, in <module>
load_entry_point('billy', 'console_scripts', 'billy-update')()
File "/opt/openstates/venv-billy/src/billy/billy/bin/update.py", line 413, in main
import_report = _do_imports(abbrev, args)
File "/opt/openstates/venv-billy/src/billy/billy/bin/update.py", line 152, in _do_imports
report['bills'] = import_bills(abbrev, settings.BILLY_DATA_DIR)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 413, in import_bills
ret = import_bill(data, votes, categorizer)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 231, in import_bill
match_sponsor_ids(abbr, data)
File "/opt/openstates/venv-billy/src/billy/billy/importers/bills.py", line 37, in match_sponsor_ids
sponsor['name'])
File "/opt/openstates/venv-billy/src/billy/billy/importers/names.py", line 26, in get_legislator_id
raise Exception("bad session: " + session)
Exception: bad session: 2018S1
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of billy_metadata/mo.py]
1 import datetime
2
3 metadata = dict(
4 name='Missouri',
5 abbreviation='mo',
6 legislature_name='Missouri General Assembly',
7 legislature_url='http://www.moga.mo.gov/',
8 capitol_timezone='America/Chicago',
9 chambers={
10 'upper': {
11 'name': 'Senate',
12 'title': 'Senator'
13 },
14 'lower': {
15 'name': 'House',
16 'title': 'Representative'
17 },
18 },
19 terms=[
20 {
21 'name': '2011-2012',
22 'sessions': ['2012'],
23 'start_year': 2011,
24 'end_year': 2012,
25 },
26 {
27 'name': '2013-2014',
28 'sessions': ['2013', '2014'],
29 'start_year': 2013,
30 'end_year': 2014,
31 },
32 {
33 'name': '2015-2016',
34 'sessions': ['2015', '2016'],
35 'start_year': 2015,
36 'end_year': 2016,
37 },
38 {
39 'name': '2017-2018',
40 'sessions': ['2017', '2017S1', '2017S2', '2018'],
41 'start_year': 2017,
42 'end_year': 2018,
43 },
44 ],
45 # General Assembly sessions convene the Wed. following the first Mon.
46 # of January and adjourn May 30.
47 # http://www.house.mo.gov/content.aspx?info=/info/howbill.htm
48 session_details={
49 '2012': {
50 'type': 'primary',
51 'start_date': datetime.date(2012,1,4),
52 'end_date': datetime.date(2012,5,30),
53 'display_name': '2012 Regular Session',
54 '_scraped_name': '2012 - 96th General Assembly - 2nd Regular Session',
55 },
56 '2013': {
57 'type': 'primary',
58 'start_date': datetime.date(2013,1,9),
59 'end_date': datetime.date(2013,5,30),
60 'display_name': '2013 Regular Session',
61 '_scraped_name': '2013 - 97th General Assembly - 1st Regular Session',
62 },
63 '2014': {
64 'type': 'primary',
65 'start_date': datetime.date(2014,1,8),
66 'end_date': datetime.date(2014,5,30),
67 'display_name': '2014 Regular Session',
68 '_scraped_name': '2014 - 97th General Assembly - 2nd Regular Session',
69 },
70 '2015': {
71 'type': 'primary',
72 'start_date': datetime.date(2015,1,7),
73 'end_date': datetime.date(2015,5,30),
74 'display_name': '2015 Regular Session',
75 '_scraped_name': '2015 - 98th General Assembly - 1st Regular Session',
76 },
77 '2016': {
78 'type': 'primary',
79 'start_date': datetime.date(2016,1,6),
80 'end_date': datetime.date(2016,5,30),
81 'display_name': '2016 Regular Session',
82 },
83 '2017': {
84 'type': 'primary',
85 'start_date': datetime.date(2017,1,4),
86 'end_date': datetime.date(2017,5,12),
87 'display_name': '2017 Regular Session',
88 },
89 '2017S1': {
90 'type': 'special',
91 'display_name': '2017 First Extraordinary Session',
92 },
93 '2017S2': {
94 'type': 'special',
95 'display_name': '2017 Second Extraordinary Session',
96 },
97 '2018': {
98 'type': 'primary',
99 'display_name': '2018 Regular Session',
100 },
101 '2018S1': {
102 'type': 'special',
103 'display_name': '2018 First Extraordinary Session',
104 },
105 },
106 feature_flags=['subjects', 'influenceexplorer'],
107 _ignored_scraped_sessions=[
108 '2014 - 97th General Assembly - 2nd Regular Session',
109 '2013 - 97th General Assembly - 1st Regular Session',
110 '2012 - 96th General Assembly - 2nd Regular Session',
111 '2011 - 96th General Assembly - 1st Regular Session',
112 '2010 - 95th General Assembly - 2nd Regular Session',
113 '2009 - 95th General Assembly - 1st Regular Session',
114 '2008 - 94th General Assembly - 2nd Regular Session',
115 '2007 - 94th General Assembly - 1st Regular Session',
116 '2006 - 93rd General Assembly - 2nd Regular Session',
117 '2005 - 93rd General Assembly - 1st Regular Session',
118 '2004 - 92nd General Assembly - 2nd Regular Session',
119 '2003 - 92nd General Assembly - 1st Regular Session',
120 '2002 - 91st General Assembly - 2nd Regular Session',
121 '2001 - 91st General Assembly - 1st Regular Session',
122 '2000 - 90th General Assembly - 2nd Regular Session',
123 '1999 - 90th General Assembly - 1st Regular Session',
124 '1998 - 89th General Assembly - 2nd Regular Session',
125 '1997 - 89th General Assembly - 1st Regular Session',
126 '1996 - 88th General Assembly - 2nd Regular Session',
127 '1995 - 88th General Assembly - 1st Regular Session'
128 ]
129 )
130
[end of billy_metadata/mo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/billy_metadata/mo.py b/billy_metadata/mo.py
--- a/billy_metadata/mo.py
+++ b/billy_metadata/mo.py
@@ -37,7 +37,7 @@
},
{
'name': '2017-2018',
- 'sessions': ['2017', '2017S1', '2017S2', '2018'],
+ 'sessions': ['2017', '2017S1', '2017S2', '2018', '2018S1'],
'start_year': 2017,
'end_year': 2018,
},
@@ -98,7 +98,7 @@
'type': 'primary',
'display_name': '2018 Regular Session',
},
- '2018S2': {
+ '2018S1': {
'type': 'special',
'display_name': '2018 First Extraordinary Session',
},
| {"golden_diff": "diff --git a/billy_metadata/mo.py b/billy_metadata/mo.py\n--- a/billy_metadata/mo.py\n+++ b/billy_metadata/mo.py\n@@ -37,7 +37,7 @@\n },\n {\n 'name': '2017-2018',\n- 'sessions': ['2017', '2017S1', '2017S2', '2018'],\n+ 'sessions': ['2017', '2017S1', '2017S2', '2018', '2018S1'],\n 'start_year': 2017,\n 'end_year': 2018,\n },\n@@ -98,7 +98,7 @@\n 'type': 'primary',\n 'display_name': '2018 Regular Session',\n },\n- '2018S2': {\n+ '2018S1': {\n 'type': 'special',\n 'display_name': '2018 First Extraordinary Session',\n },\n", "issue": "MO failing since at least 2018-05-24\nMO has been failing since 2018-05-24\n\nBased on automated runs it appears that MO has not run successfully in 2 days (2018-05-24).\n\n\n```\n 06:09:50 INFO billy: Save committee on Government Oversight, Standing\n06:09:50 INFO billy: Save committee on Tax Policy, Statutory\n06:09:50 INFO billy: Save committee on Litigation Reform, Standing\n06:09:50 INFO billy: Save committee Rules - Legislative Oversight, Standing\n06:09:50 INFO billy: Save committee Investigative on Oversight, Standing\n06:09:50 INFO billy: Save committee Agriculture Policy, Standing\n06:09:50 INFO billy: Save committee Transportation, Infrastructure and Public Safety\n06:09:50 INFO billy: Save committee Agriculture, Food Production, and Outdoor Resources\n06:09:50 INFO billy: Save committee Professional Registration\n06:09:50 INFO billy: Save committee on Employment Security, Standing\n06:09:50 INFO billy: Save committee Fiscal Oversight\n06:09:50 INFO billy: Save committee Workforce Development, Standing\n06:09:50 INFO billy: Save committee General Laws, Standing\n06:09:50 INFO billy: Save committee Subcommittee on Appropriations - Public Safety, Corrections, Transportation, and Revenue, Subcommittee\n06:09:50 INFO billy: Save committee Subcommittee on Mass Transit Security, Subcommittee\n06:09:50 INFO billy: Save committee Subcommittee on Second Amendment Preservation, Subcommittee\n06:09:50 INFO billy: Save committee on Homeland Security, Standing\n06:09:50 INFO billy: Save committee Ways and Means\n06:09:50 INFO billy: Save committee Judiciary, Standing\n06:09:50 INFO billy: Save committee Subcommittee on Ports, Subcommittee\n06:09:50 INFO billy: Save committee Corrections and Public Institutions, Standing\n06:09:50 INFO billy: Save bill lower 2018S1: HR 2\n06:09:50 WARNING billy: Value u'2018S1' for field '<obj>.session' is not in the enumeration: ['2012', '2013', '2014', '2015', '2016', '2017', '2017S1', '2017S2', '2018']\n06:09:50 INFO billy: Save bill lower 2018S1: HR 1\n06:09:50 WARNING billy: Value u'2018S1' for field '<obj>.session' is not in the enumeration: ['2012', '2013', '2014', '2015', '2016', '2017', '2017S1', '2017S2', '2018']\n06:09:50 INFO billy: Save vote 2018S1 lower: HR 2 'Reported Do Pass (H) - '\n06:09:50 WARNING billy: Value None for field '<obj>.yes_count' is not of type integer\n06:09:52 INFO billy: billy-update abbr=mo\n actions=import,report\n types=bills,legislators,votes,committees,alldata\n sessions=2018\n terms=2017-2018\n06:09:52 INFO billy: Finished importing 194 legislator files.\n06:09:53 INFO billy: imported 1 vote files\nTraceback (most recent call last):\n File \"/opt/openstates/venv-billy//bin/billy-update\", line 11, in <module>\n load_entry_point('billy', 'console_scripts', 'billy-update')()\n File \"/opt/openstates/venv-billy/src/billy/billy/bin/update.py\", line 413, in main\n import_report = _do_imports(abbrev, args)\n File \"/opt/openstates/venv-billy/src/billy/billy/bin/update.py\", line 152, in _do_imports\n report['bills'] = import_bills(abbrev, settings.BILLY_DATA_DIR)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 413, in import_bills\n ret = import_bill(data, votes, categorizer)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 231, in import_bill\n match_sponsor_ids(abbr, data)\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/bills.py\", line 37, in match_sponsor_ids\n sponsor['name'])\n File \"/opt/openstates/venv-billy/src/billy/billy/importers/names.py\", line 26, in get_legislator_id\n raise Exception(\"bad session: \" + session)\nException: bad session: 2018S1\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import datetime\n\nmetadata = dict(\n name='Missouri',\n abbreviation='mo',\n legislature_name='Missouri General Assembly',\n legislature_url='http://www.moga.mo.gov/',\n capitol_timezone='America/Chicago',\n chambers={\n 'upper': {\n 'name': 'Senate',\n 'title': 'Senator'\n },\n 'lower': {\n 'name': 'House',\n 'title': 'Representative'\n },\n },\n terms=[\n {\n 'name': '2011-2012',\n 'sessions': ['2012'],\n 'start_year': 2011,\n 'end_year': 2012,\n },\n {\n 'name': '2013-2014',\n 'sessions': ['2013', '2014'],\n 'start_year': 2013,\n 'end_year': 2014,\n },\n {\n 'name': '2015-2016',\n 'sessions': ['2015', '2016'],\n 'start_year': 2015,\n 'end_year': 2016,\n },\n {\n 'name': '2017-2018',\n 'sessions': ['2017', '2017S1', '2017S2', '2018'],\n 'start_year': 2017,\n 'end_year': 2018,\n },\n ],\n # General Assembly sessions convene the Wed. following the first Mon.\n # of January and adjourn May 30.\n # http://www.house.mo.gov/content.aspx?info=/info/howbill.htm\n session_details={\n '2012': {\n 'type': 'primary',\n 'start_date': datetime.date(2012,1,4),\n 'end_date': datetime.date(2012,5,30),\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012 - 96th General Assembly - 2nd Regular Session',\n },\n '2013': {\n 'type': 'primary',\n 'start_date': datetime.date(2013,1,9),\n 'end_date': datetime.date(2013,5,30),\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013 - 97th General Assembly - 1st Regular Session',\n },\n '2014': {\n 'type': 'primary',\n 'start_date': datetime.date(2014,1,8),\n 'end_date': datetime.date(2014,5,30),\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014 - 97th General Assembly - 2nd Regular Session',\n },\n '2015': {\n 'type': 'primary',\n 'start_date': datetime.date(2015,1,7),\n 'end_date': datetime.date(2015,5,30),\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015 - 98th General Assembly - 1st Regular Session',\n },\n '2016': {\n 'type': 'primary',\n 'start_date': datetime.date(2016,1,6),\n 'end_date': datetime.date(2016,5,30),\n 'display_name': '2016 Regular Session',\n },\n '2017': {\n 'type': 'primary',\n 'start_date': datetime.date(2017,1,4),\n 'end_date': datetime.date(2017,5,12),\n 'display_name': '2017 Regular Session',\n },\n '2017S1': {\n 'type': 'special',\n 'display_name': '2017 First Extraordinary Session',\n },\n '2017S2': {\n 'type': 'special',\n 'display_name': '2017 Second Extraordinary Session',\n },\n '2018': {\n 'type': 'primary',\n 'display_name': '2018 Regular Session',\n },\n '2018S1': {\n 'type': 'special',\n 'display_name': '2018 First Extraordinary Session',\n },\n },\n feature_flags=['subjects', 'influenceexplorer'],\n _ignored_scraped_sessions=[\n '2014 - 97th General Assembly - 2nd Regular Session',\n '2013 - 97th General Assembly - 1st Regular Session',\n '2012 - 96th General Assembly - 2nd Regular Session',\n '2011 - 96th General Assembly - 1st Regular Session',\n '2010 - 95th General Assembly - 2nd Regular Session',\n '2009 - 95th General Assembly - 1st Regular Session',\n '2008 - 94th General Assembly - 2nd Regular Session',\n '2007 - 94th General Assembly - 1st Regular Session',\n '2006 - 93rd General Assembly - 2nd Regular Session',\n '2005 - 93rd General Assembly - 1st Regular Session',\n '2004 - 92nd General Assembly - 2nd Regular Session',\n '2003 - 92nd General Assembly - 1st Regular Session',\n '2002 - 91st General Assembly - 2nd Regular Session',\n '2001 - 91st General Assembly - 1st Regular Session',\n '2000 - 90th General Assembly - 2nd Regular Session',\n '1999 - 90th General Assembly - 1st Regular Session',\n '1998 - 89th General Assembly - 2nd Regular Session',\n '1997 - 89th General Assembly - 1st Regular Session',\n '1996 - 88th General Assembly - 2nd Regular Session',\n '1995 - 88th General Assembly - 1st Regular Session'\n ]\n)\n", "path": "billy_metadata/mo.py"}]} | 3,515 | 245 |
gh_patches_debug_1657 | rasdani/github-patches | git_diff | kubeflow__pipelines-5054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeErro occurs in gcp/automl/create_dataset_for_tables component
### What steps did you take:
[A clear and concise description of what the bug is.]
[gcp/automl/create_dataset_for_tables component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/automl/create_dataset_for_tables)'s `create_time` output is declared as a string:
https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.yaml#L15
however, `google.protobuf.timestamp_pb2.Timestamp` is returned in actual fact:
https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.py#L54
FYI: The `dataset` object is an instance of `google.cloud.automl_v1beta1.types.Dataset` class and its [document](https://googleapis.dev/python/automl/0.4.0/gapic/v1beta1/types.html#google.cloud.automl_v1beta1.types.Dataset.create_time) says:
> **create_time**
> Output only. Timestamp when this dataset was created.
### What happened:
`TypeError` occurs

### What did you expect to happen:
Work.
### Environment:
<!-- Please fill in those that seem relevant. -->
How did you deploy Kubeflow Pipelines (KFP)? AI Platform Pipelines
<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->
KFP version: 1.0.4 <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->
KFP SDK version: 1.3.0 <!-- Please attach the output of this shell command: $pip list | grep kfp -->
### Anything else you would like to add:
[Miscellaneous information that will assist in solving the issue.]
/kind bug
<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->
<!--
// /area frontend
// /area backend
// /area sdk
// /area testing
// /area engprod
-->
</issue>
<code>
[start of components/gcp/automl/create_dataset_for_tables/component.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import NamedTuple
16
17
18 def automl_create_dataset_for_tables(
19 gcp_project_id: str,
20 gcp_region: str,
21 display_name: str,
22 description: str = None,
23 tables_dataset_metadata: dict = {},
24 retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
25 timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,
26 metadata: dict = None,
27 ) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):
28 '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables
29 '''
30 import google
31 from google.cloud import automl
32 client = automl.AutoMlClient()
33
34 location_path = client.location_path(gcp_project_id, gcp_region)
35 dataset_dict = {
36 'display_name': display_name,
37 'description': description,
38 'tables_dataset_metadata': tables_dataset_metadata,
39 }
40 dataset = client.create_dataset(
41 location_path,
42 dataset_dict,
43 retry or google.api_core.gapic_v1.method.DEFAULT,
44 timeout or google.api_core.gapic_v1.method.DEFAULT,
45 metadata,
46 )
47 print(dataset)
48 dataset_id = dataset.name.rsplit('/', 1)[-1]
49 dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(
50 project_id=gcp_project_id,
51 region=gcp_region,
52 dataset_id=dataset_id,
53 )
54 return (dataset.name, dataset.create_time, dataset_id, dataset_url)
55
56
57 if __name__ == '__main__':
58 import kfp
59 kfp.components.func_to_container_op(
60 automl_create_dataset_for_tables,
61 output_component_file='component.yaml',
62 base_image='python:3.7',
63 packages_to_install=['google-cloud-automl==0.4.0']
64 )
65
[end of components/gcp/automl/create_dataset_for_tables/component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/components/gcp/automl/create_dataset_for_tables/component.py b/components/gcp/automl/create_dataset_for_tables/component.py
--- a/components/gcp/automl/create_dataset_for_tables/component.py
+++ b/components/gcp/automl/create_dataset_for_tables/component.py
@@ -51,7 +51,7 @@
region=gcp_region,
dataset_id=dataset_id,
)
- return (dataset.name, dataset.create_time, dataset_id, dataset_url)
+ return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/components/gcp/automl/create_dataset_for_tables/component.py b/components/gcp/automl/create_dataset_for_tables/component.py\n--- a/components/gcp/automl/create_dataset_for_tables/component.py\n+++ b/components/gcp/automl/create_dataset_for_tables/component.py\n@@ -51,7 +51,7 @@\n region=gcp_region,\n dataset_id=dataset_id,\n )\n- return (dataset.name, dataset.create_time, dataset_id, dataset_url)\n+ return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)\n \n \n if __name__ == '__main__':\n", "issue": "TypeErro occurs in gcp/automl/create_dataset_for_tables component\n### What steps did you take:\r\n[A clear and concise description of what the bug is.]\r\n\r\n[gcp/automl/create_dataset_for_tables component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/automl/create_dataset_for_tables)'s `create_time` output is declared as a string:\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.yaml#L15\r\n\r\nhowever, `google.protobuf.timestamp_pb2.Timestamp` is returned in actual fact:\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.py#L54\r\n\r\nFYI: The `dataset` object is an instance of `google.cloud.automl_v1beta1.types.Dataset` class and its [document](https://googleapis.dev/python/automl/0.4.0/gapic/v1beta1/types.html#google.cloud.automl_v1beta1.types.Dataset.create_time) says:\r\n\r\n> **create_time**\r\n> Output only. Timestamp when this dataset was created.\r\n\r\n### What happened:\r\n\r\n`TypeError` occurs\r\n\r\n\r\n\r\n### What did you expect to happen:\r\n\r\nWork.\r\n\r\n### Environment:\r\n<!-- Please fill in those that seem relevant. -->\r\n\r\nHow did you deploy Kubeflow Pipelines (KFP)? AI Platform Pipelines\r\n<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->\r\n\r\nKFP version: 1.0.4 <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->\r\n\r\nKFP SDK version: 1.3.0 <!-- Please attach the output of this shell command: $pip list | grep kfp -->\r\n\r\n\r\n### Anything else you would like to add:\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n/kind bug\r\n<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->\r\n<!--\r\n// /area frontend\r\n// /area backend\r\n// /area sdk\r\n// /area testing\r\n// /area engprod\r\n-->\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import NamedTuple\n\n\ndef automl_create_dataset_for_tables(\n gcp_project_id: str,\n gcp_region: str,\n display_name: str,\n description: str = None,\n tables_dataset_metadata: dict = {},\n retry=None, #=google.api_core.gapic_v1.method.DEFAULT,\n timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,\n metadata: dict = None,\n) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):\n '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables\n '''\n import google\n from google.cloud import automl\n client = automl.AutoMlClient()\n\n location_path = client.location_path(gcp_project_id, gcp_region)\n dataset_dict = {\n 'display_name': display_name,\n 'description': description,\n 'tables_dataset_metadata': tables_dataset_metadata,\n }\n dataset = client.create_dataset(\n location_path,\n dataset_dict,\n retry or google.api_core.gapic_v1.method.DEFAULT,\n timeout or google.api_core.gapic_v1.method.DEFAULT,\n metadata,\n )\n print(dataset)\n dataset_id = dataset.name.rsplit('/', 1)[-1]\n dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(\n project_id=gcp_project_id,\n region=gcp_region,\n dataset_id=dataset_id,\n )\n return (dataset.name, dataset.create_time, dataset_id, dataset_url)\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n automl_create_dataset_for_tables,\n output_component_file='component.yaml',\n base_image='python:3.7',\n packages_to_install=['google-cloud-automl==0.4.0']\n )\n", "path": "components/gcp/automl/create_dataset_for_tables/component.py"}]} | 1,826 | 131 |
gh_patches_debug_35912 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
output_dir for python API invocation + parametrize output_dir tests
Need this for a cookiecutter project that's tested using [python api](http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html#calling-cookiecutter-functions-from-python).
Also parametrize output_dir tests and add testcase for cookiecutter python object.
</issue>
<code>
[start of cookiecutter/cli.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.cli
6 -----------------
7
8 Main `cookiecutter` CLI.
9 """
10
11 import os
12 import sys
13 import logging
14
15 import click
16
17 from cookiecutter import __version__
18 from cookiecutter.main import cookiecutter
19 from cookiecutter.exceptions import (
20 OutputDirExistsException, InvalidModeException
21 )
22
23 logger = logging.getLogger(__name__)
24
25
26 def version_msg():
27 python_version = sys.version[:3]
28 location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
29 message = u'Cookiecutter %(version)s from {} (Python {})'
30 return message.format(location, python_version)
31
32
33 @click.command()
34 @click.version_option(__version__, u'-V', u'--version', message=version_msg())
35 @click.argument(u'template')
36 @click.option(
37 u'--no-input', is_flag=True,
38 help=u'Do not prompt for parameters and only use cookiecutter.json '
39 u'file content',
40 )
41 @click.option(
42 u'-c', u'--checkout',
43 help=u'branch, tag or commit to checkout after git clone',
44 )
45 @click.option(
46 '-v', '--verbose',
47 is_flag=True, help='Print debug information', default=False
48 )
49 @click.option(
50 u'--replay', is_flag=True,
51 help=u'Do not prompt for parameters and only use information entered '
52 u'previously',
53 )
54 @click.option(
55 u'-f', u'--overwrite-if-exists', is_flag=True,
56 help=u'Overwrite the contents of the output directory if it already exists'
57 )
58 def main(template, no_input, checkout, verbose, replay, overwrite_if_exists):
59 """Create a project from a Cookiecutter project template (TEMPLATE)."""
60 if verbose:
61 logging.basicConfig(
62 format=u'%(levelname)s %(filename)s: %(message)s',
63 level=logging.DEBUG
64 )
65 else:
66 # Log info and above to console
67 logging.basicConfig(
68 format=u'%(levelname)s: %(message)s',
69 level=logging.INFO
70 )
71
72 try:
73 cookiecutter(template, checkout, no_input, replay=replay,
74 overwrite_if_exists=overwrite_if_exists)
75 except (OutputDirExistsException, InvalidModeException) as e:
76 click.echo(e)
77 sys.exit(1)
78
79 if __name__ == "__main__":
80 main()
81
[end of cookiecutter/cli.py]
[start of cookiecutter/main.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import logging
16 import os
17 import re
18
19 from .config import get_user_config
20 from .exceptions import InvalidModeException
21 from .prompt import prompt_for_config
22 from .generate import generate_context, generate_files
23 from .vcs import clone
24 from .replay import dump, load
25
26 logger = logging.getLogger(__name__)
27
28 builtin_abbreviations = {
29 'gh': 'https://github.com/{0}.git',
30 'bb': 'https://bitbucket.org/{0}',
31 }
32
33 REPO_REGEX = """
34 (
35 ((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.
36 | # or
37 (\w+@[\w\.]+) # something like user@...
38 )
39 .*
40 """
41
42
43 def is_repo_url(value):
44 """Return True if value is a repository URL."""
45 return bool(re.match(REPO_REGEX, value, re.VERBOSE))
46
47
48 def expand_abbreviations(template, config_dict):
49 """
50 Expand abbreviations in a template name.
51
52 :param template: The project template name.
53 :param config_dict: The user config, which will contain abbreviation
54 definitions.
55 """
56
57 abbreviations = builtin_abbreviations.copy()
58 abbreviations.update(config_dict.get('abbreviations', {}))
59
60 if template in abbreviations:
61 return abbreviations[template]
62
63 # Split on colon. If there is no colon, rest will be empty
64 # and prefix will be the whole template
65 prefix, sep, rest = template.partition(':')
66 if prefix in abbreviations:
67 return abbreviations[prefix].format(rest)
68
69 return template
70
71
72 def cookiecutter(
73 template, checkout=None, no_input=False, extra_context=None,
74 replay=False, overwrite_if_exists=False):
75 """
76 API equivalent to using Cookiecutter at the command line.
77
78 :param template: A directory containing a project template directory,
79 or a URL to a git repository.
80 :param checkout: The branch, tag or commit ID to checkout after clone.
81 :param no_input: Prompt the user at command line for manual configuration?
82 :param extra_context: A dictionary of context that overrides default
83 and user configuration.
84 :param: overwrite_if_exists: Overwrite the contents of output directory
85 if it exists
86 """
87 if replay and ((no_input is not False) or (extra_context is not None)):
88 err_msg = (
89 "You can not use both replay and no_input or extra_context "
90 "at the same time."
91 )
92 raise InvalidModeException(err_msg)
93
94 # Get user config from ~/.cookiecutterrc or equivalent
95 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
96 config_dict = get_user_config()
97
98 template = expand_abbreviations(template, config_dict)
99
100 if is_repo_url(template):
101 repo_dir = clone(
102 repo_url=template,
103 checkout=checkout,
104 clone_to_dir=config_dict['cookiecutters_dir'],
105 no_input=no_input
106 )
107 else:
108 # If it's a local repo, no need to clone or copy to your
109 # cookiecutters_dir
110 repo_dir = template
111
112 template_name = os.path.basename(template)
113
114 if replay:
115 context = load(template_name)
116 else:
117 context_file = os.path.join(repo_dir, 'cookiecutter.json')
118 logging.debug('context_file is {0}'.format(context_file))
119
120 context = generate_context(
121 context_file=context_file,
122 default_context=config_dict['default_context'],
123 extra_context=extra_context,
124 )
125
126 # prompt the user to manually configure at the command line.
127 # except when 'no-input' flag is set
128 context['cookiecutter'] = prompt_for_config(context, no_input)
129
130 dump(template_name, context)
131
132 # Create project from local context and project template.
133 generate_files(
134 repo_dir=repo_dir,
135 context=context,
136 overwrite_if_exists=overwrite_if_exists
137 )
138
[end of cookiecutter/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py
--- a/cookiecutter/cli.py
+++ b/cookiecutter/cli.py
@@ -55,7 +55,12 @@
u'-f', u'--overwrite-if-exists', is_flag=True,
help=u'Overwrite the contents of the output directory if it already exists'
)
-def main(template, no_input, checkout, verbose, replay, overwrite_if_exists):
[email protected](
+ u'-o', u'--output-dir', default='.', type=click.Path(),
+ help=u'Where to output the generated project dir into'
+)
+def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,
+ output_dir):
"""Create a project from a Cookiecutter project template (TEMPLATE)."""
if verbose:
logging.basicConfig(
@@ -70,8 +75,12 @@
)
try:
- cookiecutter(template, checkout, no_input, replay=replay,
- overwrite_if_exists=overwrite_if_exists)
+ cookiecutter(
+ template, checkout, no_input,
+ replay=replay,
+ overwrite_if_exists=overwrite_if_exists,
+ output_dir=output_dir
+ )
except (OutputDirExistsException, InvalidModeException) as e:
click.echo(e)
sys.exit(1)
diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -71,7 +71,7 @@
def cookiecutter(
template, checkout=None, no_input=False, extra_context=None,
- replay=False, overwrite_if_exists=False):
+ replay=False, overwrite_if_exists=False, output_dir='.'):
"""
API equivalent to using Cookiecutter at the command line.
@@ -83,6 +83,7 @@
and user configuration.
:param: overwrite_if_exists: Overwrite the contents of output directory
if it exists
+ :param output_dir: Where to output the generated project dir into.
"""
if replay and ((no_input is not False) or (extra_context is not None)):
err_msg = (
@@ -133,5 +134,6 @@
generate_files(
repo_dir=repo_dir,
context=context,
- overwrite_if_exists=overwrite_if_exists
+ overwrite_if_exists=overwrite_if_exists,
+ output_dir=output_dir
)
| {"golden_diff": "diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py\n--- a/cookiecutter/cli.py\n+++ b/cookiecutter/cli.py\n@@ -55,7 +55,12 @@\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n )\n-def main(template, no_input, checkout, verbose, replay, overwrite_if_exists):\[email protected](\n+ u'-o', u'--output-dir', default='.', type=click.Path(),\n+ help=u'Where to output the generated project dir into'\n+)\n+def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n+ output_dir):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n@@ -70,8 +75,12 @@\n )\n \n try:\n- cookiecutter(template, checkout, no_input, replay=replay,\n- overwrite_if_exists=overwrite_if_exists)\n+ cookiecutter(\n+ template, checkout, no_input,\n+ replay=replay,\n+ overwrite_if_exists=overwrite_if_exists,\n+ output_dir=output_dir\n+ )\n except (OutputDirExistsException, InvalidModeException) as e:\n click.echo(e)\n sys.exit(1)\ndiff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -71,7 +71,7 @@\n \n def cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n- replay=False, overwrite_if_exists=False):\n+ replay=False, overwrite_if_exists=False, output_dir='.'):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n \n@@ -83,6 +83,7 @@\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n+ :param output_dir: Where to output the generated project dir into.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n@@ -133,5 +134,6 @@\n generate_files(\n repo_dir=repo_dir,\n context=context,\n- overwrite_if_exists=overwrite_if_exists\n+ overwrite_if_exists=overwrite_if_exists,\n+ output_dir=output_dir\n )\n", "issue": "output_dir for python API invocation + parametrize output_dir tests\nNeed this for a cookiecutter project that's tested using [python api](http://cookiecutter.readthedocs.org/en/latest/advanced_usage.html#calling-cookiecutter-functions-from-python).\n\nAlso parametrize output_dir tests and add testcase for cookiecutter python object.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException, InvalidModeException\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected]()\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n cookiecutter(template, checkout, no_input, replay=replay,\n overwrite_if_exists=overwrite_if_exists)\n except (OutputDirExistsException, InvalidModeException) as e:\n click.echo(e)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config\nfrom .exceptions import InvalidModeException\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(template_name, context)\n\n # Create project from local context and project template.\n generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists\n )\n", "path": "cookiecutter/main.py"}]} | 2,522 | 547 |
gh_patches_debug_25391 | rasdani/github-patches | git_diff | jdb78__pytorch-forecasting-300 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default trainer kwargs in temporal_fusion_transformer/tuning.py cannot be overwritten
```python
[I 2021-01-27 09:42:03,226] A new study created in memory with name: no-name-8f54548f-a847-4450-9a1c-93acfdc5609e
[W 2021-01-27 09:42:03,229] Trial 0 failed because of the following error: TypeError("ABCMeta object got multiple values for keyword argument 'progress_bar_refresh_rate'",)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/optuna/_optimize.py", line 198, in _run_trial
value_or_values = func(trial)
File "/usr/local/lib/python3.6/dist-packages/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py", line 145, in objective
**trainer_kwargs,
TypeError: ABCMeta object got multiple values for keyword argument 'progress_bar_refresh_rate'
```
</issue>
<code>
[start of pytorch_forecasting/models/temporal_fusion_transformer/tuning.py]
1 """
2 Hyperparameters can be efficiently tuned with `optuna <https://optuna.readthedocs.io/>`_.
3 """
4 import copy
5 import logging
6 import os
7 from typing import Any, Dict, Tuple, Union
8
9 import numpy as np
10 import optuna
11 from optuna.integration import PyTorchLightningPruningCallback, TensorBoardCallback
12 import optuna.logging
13 import pytorch_lightning as pl
14 from pytorch_lightning import Callback
15 from pytorch_lightning.callbacks import LearningRateMonitor
16 from pytorch_lightning.loggers import TensorBoardLogger
17 import statsmodels.api as sm
18 import torch
19 from torch.utils.data import DataLoader
20
21 from pytorch_forecasting import TemporalFusionTransformer
22 from pytorch_forecasting.data import TimeSeriesDataSet
23 from pytorch_forecasting.metrics import QuantileLoss
24
25 optuna_logger = logging.getLogger("optuna")
26
27
28 class MetricsCallback(Callback):
29 """PyTorch Lightning metric callback."""
30
31 def __init__(self):
32 super().__init__()
33 self.metrics = []
34
35 def on_validation_end(self, trainer, pl_module):
36 self.metrics.append(trainer.callback_metrics)
37
38
39 def optimize_hyperparameters(
40 train_dataloader: DataLoader,
41 val_dataloader: DataLoader,
42 model_path: str,
43 max_epochs: int = 20,
44 n_trials: int = 100,
45 timeout: float = 3600 * 8.0, # 8 hours
46 gradient_clip_val_range: Tuple[float, float] = (0.01, 100.0),
47 hidden_size_range: Tuple[int, int] = (16, 265),
48 hidden_continuous_size_range: Tuple[int, int] = (8, 64),
49 attention_head_size_range: Tuple[int, int] = (1, 4),
50 dropout_range: Tuple[float, float] = (0.1, 0.3),
51 learning_rate_range: Tuple[float, float] = (1e-5, 1.0),
52 use_learning_rate_finder: bool = True,
53 trainer_kwargs: Dict[str, Any] = {},
54 log_dir: str = "lightning_logs",
55 study: optuna.Study = None,
56 verbose: Union[int, bool] = None,
57 **kwargs,
58 ) -> optuna.Study:
59 """
60 Optimize Temporal Fusion Transformer hyperparameters.
61
62 Run hyperparameter optimization. Learning rate for is determined with
63 the PyTorch Lightning learning rate finder.
64
65 Args:
66 train_dataloader (DataLoader): dataloader for training model
67 val_dataloader (DataLoader): dataloader for validating model
68 model_path (str): folder to which model checkpoints are saved
69 max_epochs (int, optional): Maximum number of epochs to run training. Defaults to 20.
70 n_trials (int, optional): Number of hyperparameter trials to run. Defaults to 100.
71 timeout (float, optional): Time in seconds after which training is stopped regardless of number of epochs
72 or validation metric. Defaults to 3600*8.0.
73 hidden_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_size`` hyperparameter. Defaults
74 to (16, 265).
75 hidden_continuous_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_continuous_size``
76 hyperparameter. Defaults to (8, 64).
77 attention_head_size_range (Tuple[int, int], optional): Minimum and maximum of ``attention_head_size``
78 hyperparameter. Defaults to (1, 4).
79 dropout_range (Tuple[float, float], optional): Minimum and maximum of ``dropout`` hyperparameter. Defaults to
80 (0.1, 0.3).
81 learning_rate_range (Tuple[float, float], optional): Learning rate range. Defaults to (1e-5, 1.0).
82 use_learning_rate_finder (bool): If to use learning rate finder or optimize as part of hyperparameters.
83 Defaults to True.
84 trainer_kwargs (Dict[str, Any], optional): Additional arguments to the
85 `PyTorch Lightning trainer <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html>`_ such
86 as ``limit_train_batches``. Defaults to {}.
87 log_dir (str, optional): Folder into which to log results for tensorboard. Defaults to "lightning_logs".
88 study (optuna.Study, optional): study to resume. Will create new study by default.
89 verbose (Union[int, bool]): level of verbosity.
90 * None: no change in verbosity level (equivalent to verbose=1 by optuna-set default).
91 * 0 or False: log only warnings.
92 * 1 or True: log pruning events.
93 * 2: optuna logging level at debug level.
94 Defaults to None.
95
96 **kwargs: Additional arguments for the :py:class:`~TemporalFusionTransformer`.
97
98 Returns:
99 optuna.Study: optuna study results
100 """
101 assert isinstance(train_dataloader.dataset, TimeSeriesDataSet) and isinstance(
102 val_dataloader.dataset, TimeSeriesDataSet
103 ), "dataloaders must be built from timeseriesdataset"
104
105 logging_level = {
106 None: optuna.logging.get_verbosity(),
107 0: optuna.logging.WARNING,
108 1: optuna.logging.INFO,
109 2: optuna.logging.DEBUG,
110 }
111 optuna_verbose = logging_level[verbose]
112 optuna.logging.set_verbosity(optuna_verbose)
113
114 loss = kwargs.get(
115 "loss", QuantileLoss()
116 ) # need a deepcopy of loss as it will otherwise propagate from one trial to the next
117
118 # create objective function
119 def objective(trial: optuna.Trial) -> float:
120 # Filenames for each trial must be made unique in order to access each checkpoint.
121 checkpoint_callback = pl.callbacks.ModelCheckpoint(
122 dirpath=os.path.join(model_path, "trial_{}".format(trial.number)), filename="{epoch}", monitor="val_loss"
123 )
124
125 # The default logger in PyTorch Lightning writes to event files to be consumed by
126 # TensorBoard. We don't use any logger here as it requires us to implement several abstract
127 # methods. Instead we setup a simple callback, that saves metrics from each validation step.
128 metrics_callback = MetricsCallback()
129 learning_rate_callback = LearningRateMonitor()
130 logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
131 gradient_clip_val = trial.suggest_loguniform("gradient_clip_val", *gradient_clip_val_range)
132 trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
133 trainer = pl.Trainer(
134 max_epochs=max_epochs,
135 gradient_clip_val=gradient_clip_val,
136 callbacks=[
137 metrics_callback,
138 learning_rate_callback,
139 checkpoint_callback,
140 PyTorchLightningPruningCallback(trial, monitor="val_loss"),
141 ],
142 logger=logger,
143 progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
144 weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
145 **trainer_kwargs,
146 )
147
148 # create model
149 hidden_size = trial.suggest_int("hidden_size", *hidden_size_range, log=True)
150 kwargs["loss"] = copy.deepcopy(loss)
151 model = TemporalFusionTransformer.from_dataset(
152 train_dataloader.dataset,
153 dropout=trial.suggest_uniform("dropout", *dropout_range),
154 hidden_size=hidden_size,
155 hidden_continuous_size=trial.suggest_int(
156 "hidden_continuous_size",
157 hidden_continuous_size_range[0],
158 min(hidden_continuous_size_range[1], hidden_size),
159 log=True,
160 ),
161 attention_head_size=trial.suggest_int("attention_head_size", *attention_head_size_range),
162 log_interval=-1,
163 **kwargs,
164 )
165 # find good learning rate
166 if use_learning_rate_finder:
167 lr_trainer = pl.Trainer(
168 gradient_clip_val=gradient_clip_val,
169 gpus=[0] if torch.cuda.is_available() else None,
170 logger=False,
171 progress_bar_refresh_rate=0,
172 weights_summary=None,
173 )
174 res = lr_trainer.tuner.lr_find(
175 model,
176 train_dataloader=train_dataloader,
177 val_dataloaders=val_dataloader,
178 early_stop_threshold=10000,
179 min_lr=learning_rate_range[0],
180 num_training=100,
181 max_lr=learning_rate_range[1],
182 )
183
184 loss_finite = np.isfinite(res.results["loss"])
185 if loss_finite.sum() > 3: # at least 3 valid values required for learning rate finder
186 lr_smoothed, loss_smoothed = sm.nonparametric.lowess(
187 np.asarray(res.results["loss"])[loss_finite],
188 np.asarray(res.results["lr"])[loss_finite],
189 frac=1.0 / 10.0,
190 )[min(loss_finite.sum() - 3, 10) : -1].T
191 optimal_idx = np.gradient(loss_smoothed).argmin()
192 optimal_lr = lr_smoothed[optimal_idx]
193 else:
194 optimal_idx = np.asarray(res.results["loss"]).argmin()
195 optimal_lr = res.results["lr"][optimal_idx]
196 optuna_logger.info(f"Using learning rate of {optimal_lr:.3g}")
197 # add learning rate artificially
198 model.hparams.learning_rate = trial.suggest_uniform("learning_rate", optimal_lr, optimal_lr)
199 else:
200 model.hparams.learning_rate = trial.suggest_loguniform("learning_rate", *learning_rate_range)
201
202 # fit
203 trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)
204
205 # report result
206 return metrics_callback.metrics[-1]["val_loss"].item()
207
208 # setup optuna and run
209 pruner = optuna.pruners.SuccessiveHalvingPruner()
210 if study is None:
211 study = optuna.create_study(direction="minimize", pruner=pruner)
212 study.optimize(objective, n_trials=n_trials, timeout=timeout)
213 return study
214
[end of pytorch_forecasting/models/temporal_fusion_transformer/tuning.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py b/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py
--- a/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py
+++ b/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py
@@ -129,8 +129,8 @@
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform("gradient_clip_val", *gradient_clip_val_range)
- trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
- trainer = pl.Trainer(
+ default_trainer_kwargs = dict(
+ gpus=[0] if torch.cuda.is_available() else None,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
callbacks=[
@@ -142,7 +142,10 @@
logger=logger,
progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
- **trainer_kwargs,
+ )
+ default_trainer_kwargs.update(trainer_kwargs)
+ trainer = pl.Trainer(
+ **default_trainer_kwargs,
)
# create model
| {"golden_diff": "diff --git a/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py b/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py\n--- a/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py\n+++ b/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py\n@@ -129,8 +129,8 @@\n learning_rate_callback = LearningRateMonitor()\n logger = TensorBoardLogger(log_dir, name=\"optuna\", version=trial.number)\n gradient_clip_val = trial.suggest_loguniform(\"gradient_clip_val\", *gradient_clip_val_range)\n- trainer_kwargs.setdefault(\"gpus\", [0] if torch.cuda.is_available() else None)\n- trainer = pl.Trainer(\n+ default_trainer_kwargs = dict(\n+ gpus=[0] if torch.cuda.is_available() else None,\n max_epochs=max_epochs,\n gradient_clip_val=gradient_clip_val,\n callbacks=[\n@@ -142,7 +142,10 @@\n logger=logger,\n progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],\n weights_summary=[None, \"top\"][optuna_verbose < optuna.logging.INFO],\n- **trainer_kwargs,\n+ )\n+ default_trainer_kwargs.update(trainer_kwargs)\n+ trainer = pl.Trainer(\n+ **default_trainer_kwargs,\n )\n \n # create model\n", "issue": "Default trainer kwargs in temporal_fusion_transformer/tuning.py cannot be overwritten\n```python\r\n[I 2021-01-27 09:42:03,226] A new study created in memory with name: no-name-8f54548f-a847-4450-9a1c-93acfdc5609e\r\n[W 2021-01-27 09:42:03,229] Trial 0 failed because of the following error: TypeError(\"ABCMeta object got multiple values for keyword argument 'progress_bar_refresh_rate'\",)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/optuna/_optimize.py\", line 198, in _run_trial\r\n value_or_values = func(trial)\r\n File \"/usr/local/lib/python3.6/dist-packages/pytorch_forecasting/models/temporal_fusion_transformer/tuning.py\", line 145, in objective\r\n **trainer_kwargs,\r\nTypeError: ABCMeta object got multiple values for keyword argument 'progress_bar_refresh_rate'\r\n```\n", "before_files": [{"content": "\"\"\"\nHyperparameters can be efficiently tuned with `optuna <https://optuna.readthedocs.io/>`_.\n\"\"\"\nimport copy\nimport logging\nimport os\nfrom typing import Any, Dict, Tuple, Union\n\nimport numpy as np\nimport optuna\nfrom optuna.integration import PyTorchLightningPruningCallback, TensorBoardCallback\nimport optuna.logging\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Callback\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nfrom pytorch_lightning.loggers import TensorBoardLogger\nimport statsmodels.api as sm\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom pytorch_forecasting import TemporalFusionTransformer\nfrom pytorch_forecasting.data import TimeSeriesDataSet\nfrom pytorch_forecasting.metrics import QuantileLoss\n\noptuna_logger = logging.getLogger(\"optuna\")\n\n\nclass MetricsCallback(Callback):\n \"\"\"PyTorch Lightning metric callback.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.metrics = []\n\n def on_validation_end(self, trainer, pl_module):\n self.metrics.append(trainer.callback_metrics)\n\n\ndef optimize_hyperparameters(\n train_dataloader: DataLoader,\n val_dataloader: DataLoader,\n model_path: str,\n max_epochs: int = 20,\n n_trials: int = 100,\n timeout: float = 3600 * 8.0, # 8 hours\n gradient_clip_val_range: Tuple[float, float] = (0.01, 100.0),\n hidden_size_range: Tuple[int, int] = (16, 265),\n hidden_continuous_size_range: Tuple[int, int] = (8, 64),\n attention_head_size_range: Tuple[int, int] = (1, 4),\n dropout_range: Tuple[float, float] = (0.1, 0.3),\n learning_rate_range: Tuple[float, float] = (1e-5, 1.0),\n use_learning_rate_finder: bool = True,\n trainer_kwargs: Dict[str, Any] = {},\n log_dir: str = \"lightning_logs\",\n study: optuna.Study = None,\n verbose: Union[int, bool] = None,\n **kwargs,\n) -> optuna.Study:\n \"\"\"\n Optimize Temporal Fusion Transformer hyperparameters.\n\n Run hyperparameter optimization. Learning rate for is determined with\n the PyTorch Lightning learning rate finder.\n\n Args:\n train_dataloader (DataLoader): dataloader for training model\n val_dataloader (DataLoader): dataloader for validating model\n model_path (str): folder to which model checkpoints are saved\n max_epochs (int, optional): Maximum number of epochs to run training. Defaults to 20.\n n_trials (int, optional): Number of hyperparameter trials to run. Defaults to 100.\n timeout (float, optional): Time in seconds after which training is stopped regardless of number of epochs\n or validation metric. Defaults to 3600*8.0.\n hidden_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_size`` hyperparameter. Defaults\n to (16, 265).\n hidden_continuous_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_continuous_size``\n hyperparameter. Defaults to (8, 64).\n attention_head_size_range (Tuple[int, int], optional): Minimum and maximum of ``attention_head_size``\n hyperparameter. Defaults to (1, 4).\n dropout_range (Tuple[float, float], optional): Minimum and maximum of ``dropout`` hyperparameter. Defaults to\n (0.1, 0.3).\n learning_rate_range (Tuple[float, float], optional): Learning rate range. Defaults to (1e-5, 1.0).\n use_learning_rate_finder (bool): If to use learning rate finder or optimize as part of hyperparameters.\n Defaults to True.\n trainer_kwargs (Dict[str, Any], optional): Additional arguments to the\n `PyTorch Lightning trainer <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html>`_ such\n as ``limit_train_batches``. Defaults to {}.\n log_dir (str, optional): Folder into which to log results for tensorboard. Defaults to \"lightning_logs\".\n study (optuna.Study, optional): study to resume. Will create new study by default.\n verbose (Union[int, bool]): level of verbosity.\n * None: no change in verbosity level (equivalent to verbose=1 by optuna-set default).\n * 0 or False: log only warnings.\n * 1 or True: log pruning events.\n * 2: optuna logging level at debug level.\n Defaults to None.\n\n **kwargs: Additional arguments for the :py:class:`~TemporalFusionTransformer`.\n\n Returns:\n optuna.Study: optuna study results\n \"\"\"\n assert isinstance(train_dataloader.dataset, TimeSeriesDataSet) and isinstance(\n val_dataloader.dataset, TimeSeriesDataSet\n ), \"dataloaders must be built from timeseriesdataset\"\n\n logging_level = {\n None: optuna.logging.get_verbosity(),\n 0: optuna.logging.WARNING,\n 1: optuna.logging.INFO,\n 2: optuna.logging.DEBUG,\n }\n optuna_verbose = logging_level[verbose]\n optuna.logging.set_verbosity(optuna_verbose)\n\n loss = kwargs.get(\n \"loss\", QuantileLoss()\n ) # need a deepcopy of loss as it will otherwise propagate from one trial to the next\n\n # create objective function\n def objective(trial: optuna.Trial) -> float:\n # Filenames for each trial must be made unique in order to access each checkpoint.\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n dirpath=os.path.join(model_path, \"trial_{}\".format(trial.number)), filename=\"{epoch}\", monitor=\"val_loss\"\n )\n\n # The default logger in PyTorch Lightning writes to event files to be consumed by\n # TensorBoard. We don't use any logger here as it requires us to implement several abstract\n # methods. Instead we setup a simple callback, that saves metrics from each validation step.\n metrics_callback = MetricsCallback()\n learning_rate_callback = LearningRateMonitor()\n logger = TensorBoardLogger(log_dir, name=\"optuna\", version=trial.number)\n gradient_clip_val = trial.suggest_loguniform(\"gradient_clip_val\", *gradient_clip_val_range)\n trainer_kwargs.setdefault(\"gpus\", [0] if torch.cuda.is_available() else None)\n trainer = pl.Trainer(\n max_epochs=max_epochs,\n gradient_clip_val=gradient_clip_val,\n callbacks=[\n metrics_callback,\n learning_rate_callback,\n checkpoint_callback,\n PyTorchLightningPruningCallback(trial, monitor=\"val_loss\"),\n ],\n logger=logger,\n progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],\n weights_summary=[None, \"top\"][optuna_verbose < optuna.logging.INFO],\n **trainer_kwargs,\n )\n\n # create model\n hidden_size = trial.suggest_int(\"hidden_size\", *hidden_size_range, log=True)\n kwargs[\"loss\"] = copy.deepcopy(loss)\n model = TemporalFusionTransformer.from_dataset(\n train_dataloader.dataset,\n dropout=trial.suggest_uniform(\"dropout\", *dropout_range),\n hidden_size=hidden_size,\n hidden_continuous_size=trial.suggest_int(\n \"hidden_continuous_size\",\n hidden_continuous_size_range[0],\n min(hidden_continuous_size_range[1], hidden_size),\n log=True,\n ),\n attention_head_size=trial.suggest_int(\"attention_head_size\", *attention_head_size_range),\n log_interval=-1,\n **kwargs,\n )\n # find good learning rate\n if use_learning_rate_finder:\n lr_trainer = pl.Trainer(\n gradient_clip_val=gradient_clip_val,\n gpus=[0] if torch.cuda.is_available() else None,\n logger=False,\n progress_bar_refresh_rate=0,\n weights_summary=None,\n )\n res = lr_trainer.tuner.lr_find(\n model,\n train_dataloader=train_dataloader,\n val_dataloaders=val_dataloader,\n early_stop_threshold=10000,\n min_lr=learning_rate_range[0],\n num_training=100,\n max_lr=learning_rate_range[1],\n )\n\n loss_finite = np.isfinite(res.results[\"loss\"])\n if loss_finite.sum() > 3: # at least 3 valid values required for learning rate finder\n lr_smoothed, loss_smoothed = sm.nonparametric.lowess(\n np.asarray(res.results[\"loss\"])[loss_finite],\n np.asarray(res.results[\"lr\"])[loss_finite],\n frac=1.0 / 10.0,\n )[min(loss_finite.sum() - 3, 10) : -1].T\n optimal_idx = np.gradient(loss_smoothed).argmin()\n optimal_lr = lr_smoothed[optimal_idx]\n else:\n optimal_idx = np.asarray(res.results[\"loss\"]).argmin()\n optimal_lr = res.results[\"lr\"][optimal_idx]\n optuna_logger.info(f\"Using learning rate of {optimal_lr:.3g}\")\n # add learning rate artificially\n model.hparams.learning_rate = trial.suggest_uniform(\"learning_rate\", optimal_lr, optimal_lr)\n else:\n model.hparams.learning_rate = trial.suggest_loguniform(\"learning_rate\", *learning_rate_range)\n\n # fit\n trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)\n\n # report result\n return metrics_callback.metrics[-1][\"val_loss\"].item()\n\n # setup optuna and run\n pruner = optuna.pruners.SuccessiveHalvingPruner()\n if study is None:\n study = optuna.create_study(direction=\"minimize\", pruner=pruner)\n study.optimize(objective, n_trials=n_trials, timeout=timeout)\n return study\n", "path": "pytorch_forecasting/models/temporal_fusion_transformer/tuning.py"}]} | 3,511 | 305 |
gh_patches_debug_13674 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError for invalid headers in ASGI middleware
When the ASGI middleware receives headers with invalid encoding, for example as the result of somebody fuzzing an endpoint as is the case here, it introduces a Unicode decode error into the HTTP pipeline. It shouldn't be doing this.
### Which version of dd-trace-py are you using?
0.46.0
### Which version of the libraries are you using?
What I believe are the potentially relevant libraries are listed below.
```
datadog==0.40.1
...
ddtrace==0.46.0
...
fastapi==0.63.0
...
httpcore==0.12.3
httptools==0.1.1
...
starlette==0.13.6
...
uvicorn==0.13.4
uvloop==0.15.2
```
### How can we reproduce your problem?
```python
from ddtrace.contrib.asgi.middleware import _extract_headers
def test_ddtrace_invalid_header_name_encoding():
assert _extract_headers({"headers": [((bytes.fromhex("c0")), "test")]}) == {}
def test_ddtrace_invalid_header_value_encoding():
assert _extract_headers({"headers": [("test", (bytes.fromhex("c0")))]}) == {}
```
### What is the result that you get?
The middleware blows up with a `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 0: invalid start byte` exception.
### What is the result that you expected?
This is trace middleware so it should not introduce exceptions into request processing. In the case of a header it can't decode then I'd expect it to either:
1. Omit the invalid header from the result and continue attempting to decode the remainder, or
2. Substitute a sentinel value for the unprocessable header to indicate an error
I've suggested (1) in the repro above, as that seems like it'd be the least likely to cause any problems.
</issue>
<code>
[start of ddtrace/contrib/asgi/middleware.py]
1 import sys
2
3 import ddtrace
4 from ddtrace import config
5 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
6 from ddtrace.ext import SpanTypes
7 from ddtrace.ext import http
8
9 from .. import trace_utils
10 from ...internal.compat import reraise
11 from ...internal.logger import get_logger
12 from .utils import guarantee_single_callable
13
14
15 log = get_logger(__name__)
16
17 config._add(
18 "asgi",
19 dict(service_name=config._get_service(default="asgi"), request_span_name="asgi.request", distributed_tracing=True),
20 )
21
22 ASGI_VERSION = "asgi.version"
23 ASGI_SPEC_VERSION = "asgi.spec_version"
24
25
26 def bytes_to_str(str_or_bytes):
27 return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes
28
29
30 def _extract_versions_from_scope(scope, integration_config):
31 tags = {}
32
33 http_version = scope.get("http_version")
34 if http_version:
35 tags[http.VERSION] = http_version
36
37 scope_asgi = scope.get("asgi")
38
39 if scope_asgi and "version" in scope_asgi:
40 tags[ASGI_VERSION] = scope_asgi["version"]
41
42 if scope_asgi and "spec_version" in scope_asgi:
43 tags[ASGI_SPEC_VERSION] = scope_asgi["spec_version"]
44
45 return tags
46
47
48 def _extract_headers(scope):
49 headers = scope.get("headers")
50 if headers:
51 # headers: (Iterable[[byte string, byte string]])
52 return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)
53 return {}
54
55
56 def _default_handle_exception_span(exc, span):
57 """Default handler for exception for span"""
58 span.set_tag(http.STATUS_CODE, 500)
59
60
61 class TraceMiddleware:
62 """
63 ASGI application middleware that traces the requests.
64
65 Args:
66 app: The ASGI application.
67 tracer: Custom tracer. Defaults to the global tracer.
68 """
69
70 def __init__(
71 self,
72 app,
73 tracer=None,
74 integration_config=config.asgi,
75 handle_exception_span=_default_handle_exception_span,
76 span_modifier=None,
77 ):
78 self.app = guarantee_single_callable(app)
79 self.tracer = tracer or ddtrace.tracer
80 self.integration_config = integration_config
81 self.handle_exception_span = handle_exception_span
82 self.span_modifier = span_modifier
83
84 async def __call__(self, scope, receive, send):
85 if scope["type"] != "http":
86 return await self.app(scope, receive, send)
87
88 headers = _extract_headers(scope)
89
90 trace_utils.activate_distributed_headers(
91 self.tracer, int_config=self.integration_config, request_headers=headers
92 )
93
94 resource = "{} {}".format(scope["method"], scope["path"])
95
96 span = self.tracer.trace(
97 name=self.integration_config.get("request_span_name", "asgi.request"),
98 service=trace_utils.int_service(None, self.integration_config),
99 resource=resource,
100 span_type=SpanTypes.WEB,
101 )
102
103 if self.span_modifier:
104 self.span_modifier(span, scope)
105
106 sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)
107 if sample_rate is not None:
108 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)
109
110 method = scope.get("method")
111 server = scope.get("server")
112 if server and len(server) == 2:
113 port = server[1]
114 server_host = server[0] + (":" + str(port) if port is not None and port != 80 else "")
115 full_path = scope.get("root_path", "") + scope.get("path", "")
116 url = scope.get("scheme", "http") + "://" + server_host + full_path
117 else:
118 url = None
119
120 if self.integration_config.trace_query_string:
121 query_string = scope.get("query_string")
122 if len(query_string) > 0:
123 query_string = bytes_to_str(query_string)
124 else:
125 query_string = None
126
127 trace_utils.set_http_meta(
128 span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers
129 )
130
131 tags = _extract_versions_from_scope(scope, self.integration_config)
132 span.set_tags(tags)
133
134 async def wrapped_send(message):
135 if span and message.get("type") == "http.response.start" and "status" in message:
136 status_code = message["status"]
137 else:
138 status_code = None
139
140 if "headers" in message:
141 response_headers = message["headers"]
142 else:
143 response_headers = None
144
145 trace_utils.set_http_meta(
146 span, self.integration_config, status_code=status_code, response_headers=response_headers
147 )
148
149 return await send(message)
150
151 try:
152 return await self.app(scope, receive, wrapped_send)
153 except Exception as exc:
154 (exc_type, exc_val, exc_tb) = sys.exc_info()
155 span.set_exc_info(exc_type, exc_val, exc_tb)
156 self.handle_exception_span(exc, span)
157 reraise(exc_type, exc_val, exc_tb)
158 finally:
159 span.finish()
160
[end of ddtrace/contrib/asgi/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py
--- a/ddtrace/contrib/asgi/middleware.py
+++ b/ddtrace/contrib/asgi/middleware.py
@@ -85,11 +85,15 @@
if scope["type"] != "http":
return await self.app(scope, receive, send)
- headers = _extract_headers(scope)
-
- trace_utils.activate_distributed_headers(
- self.tracer, int_config=self.integration_config, request_headers=headers
- )
+ try:
+ headers = _extract_headers(scope)
+ except Exception:
+ log.warning("failed to decode headers for distributed tracing", exc_info=True)
+ headers = {}
+ else:
+ trace_utils.activate_distributed_headers(
+ self.tracer, int_config=self.integration_config, request_headers=headers
+ )
resource = "{} {}".format(scope["method"], scope["path"])
| {"golden_diff": "diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py\n--- a/ddtrace/contrib/asgi/middleware.py\n+++ b/ddtrace/contrib/asgi/middleware.py\n@@ -85,11 +85,15 @@\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n \n- headers = _extract_headers(scope)\n-\n- trace_utils.activate_distributed_headers(\n- self.tracer, int_config=self.integration_config, request_headers=headers\n- )\n+ try:\n+ headers = _extract_headers(scope)\n+ except Exception:\n+ log.warning(\"failed to decode headers for distributed tracing\", exc_info=True)\n+ headers = {}\n+ else:\n+ trace_utils.activate_distributed_headers(\n+ self.tracer, int_config=self.integration_config, request_headers=headers\n+ )\n \n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n", "issue": "UnicodeDecodeError for invalid headers in ASGI middleware\nWhen the ASGI middleware receives headers with invalid encoding, for example as the result of somebody fuzzing an endpoint as is the case here, it introduces a Unicode decode error into the HTTP pipeline. It shouldn't be doing this.\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n0.46.0\r\n\r\n### Which version of the libraries are you using?\r\n\r\nWhat I believe are the potentially relevant libraries are listed below.\r\n\r\n```\r\ndatadog==0.40.1\r\n...\r\nddtrace==0.46.0\r\n...\r\nfastapi==0.63.0\r\n...\r\nhttpcore==0.12.3\r\nhttptools==0.1.1\r\n...\r\nstarlette==0.13.6\r\n...\r\nuvicorn==0.13.4\r\nuvloop==0.15.2\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n```python\r\nfrom ddtrace.contrib.asgi.middleware import _extract_headers\r\n\r\n\r\ndef test_ddtrace_invalid_header_name_encoding():\r\n assert _extract_headers({\"headers\": [((bytes.fromhex(\"c0\")), \"test\")]}) == {}\r\n\r\n\r\ndef test_ddtrace_invalid_header_value_encoding():\r\n assert _extract_headers({\"headers\": [(\"test\", (bytes.fromhex(\"c0\")))]}) == {}\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe middleware blows up with a `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 0: invalid start byte` exception.\r\n\r\n### What is the result that you expected?\r\n\r\nThis is trace middleware so it should not introduce exceptions into request processing. In the case of a header it can't decode then I'd expect it to either:\r\n\r\n1. Omit the invalid header from the result and continue attempting to decode the remainder, or\r\n2. Substitute a sentinel value for the unprocessable header to indicate an error\r\n\r\nI've suggested (1) in the repro above, as that seems like it'd be the least likely to cause any problems.\n", "before_files": [{"content": "import sys\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import http\n\nfrom .. import trace_utils\nfrom ...internal.compat import reraise\nfrom ...internal.logger import get_logger\nfrom .utils import guarantee_single_callable\n\n\nlog = get_logger(__name__)\n\nconfig._add(\n \"asgi\",\n dict(service_name=config._get_service(default=\"asgi\"), request_span_name=\"asgi.request\", distributed_tracing=True),\n)\n\nASGI_VERSION = \"asgi.version\"\nASGI_SPEC_VERSION = \"asgi.spec_version\"\n\n\ndef bytes_to_str(str_or_bytes):\n return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes\n\n\ndef _extract_versions_from_scope(scope, integration_config):\n tags = {}\n\n http_version = scope.get(\"http_version\")\n if http_version:\n tags[http.VERSION] = http_version\n\n scope_asgi = scope.get(\"asgi\")\n\n if scope_asgi and \"version\" in scope_asgi:\n tags[ASGI_VERSION] = scope_asgi[\"version\"]\n\n if scope_asgi and \"spec_version\" in scope_asgi:\n tags[ASGI_SPEC_VERSION] = scope_asgi[\"spec_version\"]\n\n return tags\n\n\ndef _extract_headers(scope):\n headers = scope.get(\"headers\")\n if headers:\n # headers: (Iterable[[byte string, byte string]])\n return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)\n return {}\n\n\ndef _default_handle_exception_span(exc, span):\n \"\"\"Default handler for exception for span\"\"\"\n span.set_tag(http.STATUS_CODE, 500)\n\n\nclass TraceMiddleware:\n \"\"\"\n ASGI application middleware that traces the requests.\n\n Args:\n app: The ASGI application.\n tracer: Custom tracer. Defaults to the global tracer.\n \"\"\"\n\n def __init__(\n self,\n app,\n tracer=None,\n integration_config=config.asgi,\n handle_exception_span=_default_handle_exception_span,\n span_modifier=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = tracer or ddtrace.tracer\n self.integration_config = integration_config\n self.handle_exception_span = handle_exception_span\n self.span_modifier = span_modifier\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n\n headers = _extract_headers(scope)\n\n trace_utils.activate_distributed_headers(\n self.tracer, int_config=self.integration_config, request_headers=headers\n )\n\n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n\n span = self.tracer.trace(\n name=self.integration_config.get(\"request_span_name\", \"asgi.request\"),\n service=trace_utils.int_service(None, self.integration_config),\n resource=resource,\n span_type=SpanTypes.WEB,\n )\n\n if self.span_modifier:\n self.span_modifier(span, scope)\n\n sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = scope.get(\"method\")\n server = scope.get(\"server\")\n if server and len(server) == 2:\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port is not None and port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n else:\n url = None\n\n if self.integration_config.trace_query_string:\n query_string = scope.get(\"query_string\")\n if len(query_string) > 0:\n query_string = bytes_to_str(query_string)\n else:\n query_string = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers\n )\n\n tags = _extract_versions_from_scope(scope, self.integration_config)\n span.set_tags(tags)\n\n async def wrapped_send(message):\n if span and message.get(\"type\") == \"http.response.start\" and \"status\" in message:\n status_code = message[\"status\"]\n else:\n status_code = None\n\n if \"headers\" in message:\n response_headers = message[\"headers\"]\n else:\n response_headers = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n\n return await send(message)\n\n try:\n return await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n span.set_exc_info(exc_type, exc_val, exc_tb)\n self.handle_exception_span(exc, span)\n reraise(exc_type, exc_val, exc_tb)\n finally:\n span.finish()\n", "path": "ddtrace/contrib/asgi/middleware.py"}]} | 2,459 | 209 |
gh_patches_debug_7432 | rasdani/github-patches | git_diff | pulp__pulpcore-3412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc
**Version**
3.18.10
**Describe the bug**
Migration 0077 fails when you have a remote that has an @ somewhere in the path
```
Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):
File "/usr/bin/pulpcore-manager", line 33, in <module>
sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())
File "/usr/lib/python3.9/site-packages/pulpcore/app/manage.py", line 11, in manage
execute_from_command_line(sys.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 89, in wrapped
res = handle_func(*args, **kwargs)
File "/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py", line 244, in handle
post_migrate_state = executor.migrate(
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 117, in migrate
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
state = migration.apply(state, schema_editor)
File "/usr/lib/python3.9/site-packages/django/db/migrations/migration.py", line 126, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
self.code(from_state.apps, schema_editor)
File "/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py", line 19, in move_remote_url_credentials
_, url_split = url.netloc.rsplit("@", maxsplit=1)
ValueError: not enough values to unpack (expected 2, got 1)
```
**To Reproduce**
Steps to reproduce the behavior:
* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`
* Try to migrate 0077
**Expected behavior**
migration aplies
**Additional context**
https://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088
</issue>
<code>
[start of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
1 # Generated by Django 3.2.6 on 2021-09-29 14:00
2
3 from urllib.parse import urlparse, urlunparse
4
5 from django.db import migrations
6
7
8 def move_remote_url_credentials(apps, schema_editor):
9 Remote = apps.get_model("core", "Remote")
10
11 for remote in Remote.objects.filter(url__contains="@").iterator():
12 url = urlparse(remote.url)
13
14 if not remote.username:
15 remote.username = url.username
16 if not remote.password:
17 remote.password = url.password
18
19 _, url_split = url.netloc.rsplit("@", maxsplit=1)
20 remote.url = urlunparse(url._replace(netloc=url_split))
21 remote.save()
22
23
24 class Migration(migrations.Migration):
25
26 dependencies = [
27 ('core', '0076_remove_reserved_resource'),
28 ]
29
30 operations = [
31 migrations.RunPython(
32 code=move_remote_url_credentials,
33 reverse_code=migrations.RunPython.noop,
34 elidable=True,
35 )
36 ]
37
[end of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py
+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
@@ -11,6 +11,11 @@
for remote in Remote.objects.filter(url__contains="@").iterator():
url = urlparse(remote.url)
+ if '@' not in url.netloc:
+ # URLs can have an @ in other places than the netloc,
+ # but those do not indicate credentials
+ continue
+
if not remote.username:
remote.username = url.username
if not remote.password:
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n@@ -11,6 +11,11 @@\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n \n+ if '@' not in url.netloc:\n+ # URLs can have an @ in other places than the netloc,\n+ # but those do not indicate credentials\n+ continue\n+\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n", "issue": "0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc\n**Version**\r\n3.18.10\r\n\r\n**Describe the bug**\r\nMigration 0077 fails when you have a remote that has an @ somewhere in the path\r\n\r\n```\r\n Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):\r\n File \"/usr/bin/pulpcore-manager\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/manage.py\", line 11, in manage\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 89, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py\", line 244, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py\", line 19, in move_remote_url_credentials\r\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\r\nValueError: not enough values to unpack (expected 2, got 1)\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`\r\n* Try to migrate 0077\r\n\r\n**Expected behavior**\r\nmigration aplies\r\n\r\n**Additional context**\r\nhttps://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.6 on 2021-09-29 14:00\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom django.db import migrations\n\n\ndef move_remote_url_credentials(apps, schema_editor):\n Remote = apps.get_model(\"core\", \"Remote\")\n\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n remote.password = url.password\n\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\n remote.url = urlunparse(url._replace(netloc=url_split))\n remote.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0076_remove_reserved_resource'),\n ]\n\n operations = [\n migrations.RunPython(\n code=move_remote_url_credentials,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n )\n ]\n", "path": "pulpcore/app/migrations/0077_move_remote_url_credentials.py"}]} | 1,673 | 172 |
gh_patches_debug_30344 | rasdani/github-patches | git_diff | CTFd__CTFd-760 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dynamic challenges not work when using user mode
**Environment**:
- CTFd Version/Commit:`2bd310b5d99f4305f8b61076f9af3b7e3c91ba8f`
- Operating System: `ubuntu 16.04`
- Web Browser and Version: `Chrome`
when using user mode , `solve_count` always get 0
https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L95
```
solve_count = Solves.query \
.join(Teams, Solves.team_id == Teams.id) \
.filter(Solves.challenge_id == challenge.id, Teams.banned == False) \
.count()
```
</issue>
<code>
[start of CTFd/plugins/dynamic_challenges/__init__.py]
1 from __future__ import division # Use floating point for math calculations
2 from CTFd.plugins.challenges import BaseChallenge, CHALLENGE_CLASSES
3 from CTFd.plugins import register_plugin_assets_directory
4 from CTFd.plugins.flags import get_flag_class
5 from CTFd.models import db, Solves, Fails, Flags, Challenges, ChallengeFiles, Tags, Teams, Hints
6 from CTFd import utils
7 from CTFd.utils.migrations import upgrade
8 from CTFd.utils.user import get_ip
9 from CTFd.utils.uploads import upload_file, delete_file
10 from flask import Blueprint
11 import math
12
13
14 class DynamicValueChallenge(BaseChallenge):
15 id = "dynamic" # Unique identifier used to register challenges
16 name = "dynamic" # Name of a challenge type
17 templates = { # Handlebars templates used for each aspect of challenge editing & viewing
18 'create': '/plugins/dynamic_challenges/assets/create.html',
19 'update': '/plugins/dynamic_challenges/assets/update.html',
20 'view': '/plugins/dynamic_challenges/assets/view.html',
21 }
22 scripts = { # Scripts that are loaded when a template is loaded
23 'create': '/plugins/dynamic_challenges/assets/create.js',
24 'update': '/plugins/dynamic_challenges/assets/update.js',
25 'view': '/plugins/dynamic_challenges/assets/view.js',
26 }
27 # Route at which files are accessible. This must be registered using register_plugin_assets_directory()
28 route = '/plugins/dynamic_challenges/assets/'
29 # Blueprint used to access the static_folder directory.
30 blueprint = Blueprint('dynamic_challenges', __name__, template_folder='templates', static_folder='assets')
31
32 @staticmethod
33 def create(request):
34 """
35 This method is used to process the challenge creation request.
36
37 :param request:
38 :return:
39 """
40 data = request.form or request.get_json()
41 challenge = DynamicChallenge(**data)
42
43 db.session.add(challenge)
44 db.session.commit()
45
46 return challenge
47
48 @staticmethod
49 def read(challenge):
50 """
51 This method is in used to access the data of a challenge in a format processable by the front end.
52
53 :param challenge:
54 :return: Challenge object, data dictionary to be returned to the user
55 """
56 challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()
57 data = {
58 'id': challenge.id,
59 'name': challenge.name,
60 'value': challenge.value,
61 'initial': challenge.initial,
62 'decay': challenge.decay,
63 'minimum': challenge.minimum,
64 'description': challenge.description,
65 'category': challenge.category,
66 'state': challenge.state,
67 'max_attempts': challenge.max_attempts,
68 'type': challenge.type,
69 'type_data': {
70 'id': DynamicValueChallenge.id,
71 'name': DynamicValueChallenge.name,
72 'templates': DynamicValueChallenge.templates,
73 'scripts': DynamicValueChallenge.scripts,
74 }
75 }
76 return data
77
78 @staticmethod
79 def update(challenge, request):
80 """
81 This method is used to update the information associated with a challenge. This should be kept strictly to the
82 Challenges table and any child tables.
83
84 :param challenge:
85 :param request:
86 :return:
87 """
88 data = request.form or request.get_json()
89 data['initial'] = float(data.get('initial', 0))
90 data['minimum'] = float(data.get('minimum', 0))
91 data['decay'] = float(data.get('decay', 0))
92 for attr, value in data.items():
93 setattr(challenge, attr, value)
94
95 solve_count = Solves.query \
96 .join(Teams, Solves.team_id == Teams.id) \
97 .filter(Solves.challenge_id == challenge.id, Teams.banned == False) \
98 .count()
99
100 # It is important that this calculation takes into account floats.
101 # Hence this file uses from __future__ import division
102 value = (((challenge.minimum - challenge.initial) / (challenge.decay ** 2)) * (solve_count ** 2)) + challenge.initial
103
104 value = math.ceil(value)
105
106 if value < challenge.minimum:
107 value = challenge.minimum
108
109 challenge.value = value
110
111 db.session.commit()
112 return challenge
113
114 @staticmethod
115 def delete(challenge):
116 """
117 This method is used to delete the resources used by a challenge.
118
119 :param challenge:
120 :return:
121 """
122 Fails.query.filter_by(challenge_id=challenge.id).delete()
123 Solves.query.filter_by(challenge_id=challenge.id).delete()
124 Flags.query.filter_by(challenge_id=challenge.id).delete()
125 files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()
126 for f in files:
127 delete_file(f.id)
128 ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()
129 Tags.query.filter_by(challenge_id=challenge.id).delete()
130 Hints.query.filter_by(challenge_id=challenge.id).delete()
131 DynamicChallenge.query.filter_by(id=challenge.id).delete()
132 Challenges.query.filter_by(id=challenge.id).delete()
133 db.session.commit()
134
135 @staticmethod
136 def attempt(challenge, request):
137 """
138 This method is used to check whether a given input is right or wrong. It does not make any changes and should
139 return a boolean for correctness and a string to be shown to the user. It is also in charge of parsing the
140 user's input from the request itself.
141
142 :param challenge: The Challenge object from the database
143 :param request: The request the user submitted
144 :return: (boolean, string)
145 """
146 data = request.form or request.get_json()
147 submission = data['submission'].strip()
148 flags = Flags.query.filter_by(challenge_id=challenge.id).all()
149 for flag in flags:
150 if get_flag_class(flag.type).compare(flag, submission):
151 return True, 'Correct'
152 return False, 'Incorrect'
153
154 @staticmethod
155 def solve(user, team, challenge, request):
156 """
157 This method is used to insert Solves into the database in order to mark a challenge as solved.
158
159 :param team: The Team object from the database
160 :param chal: The Challenge object from the database
161 :param request: The request the user submitted
162 :return:
163 """
164 chal = DynamicChallenge.query.filter_by(id=challenge.id).first()
165 data = request.form or request.get_json()
166 submission = data['submission'].strip()
167
168 solve_count = Solves.query\
169 .join(Teams, Solves.team_id == Teams.id)\
170 .filter(Solves.challenge_id == chal.id, Teams.banned == False)\
171 .count()
172
173 # It is important that this calculation takes into account floats.
174 # Hence this file uses from __future__ import division
175 value = (
176 (
177 (chal.minimum - chal.initial) / (chal.decay**2)
178 ) * (solve_count**2)
179 ) + chal.initial
180
181 value = math.ceil(value)
182
183 if value < chal.minimum:
184 value = chal.minimum
185
186 chal.value = value
187
188 solve = Solves(
189 user_id=user.id,
190 team_id=team.id if team else None,
191 challenge_id=challenge.id,
192 ip=get_ip(req=request),
193 provided=submission
194 )
195 db.session.add(solve)
196 db.session.commit()
197 db.session.close()
198
199 @staticmethod
200 def fail(user, team, challenge, request):
201 """
202 This method is used to insert Fails into the database in order to mark an answer incorrect.
203
204 :param team: The Team object from the database
205 :param challenge: The Challenge object from the database
206 :param request: The request the user submitted
207 :return:
208 """
209 data = request.form or request.get_json()
210 submission = data['submission'].strip()
211 wrong = Fails(
212 user_id=user.id,
213 team_id=team.id if team else None,
214 challenge_id=challenge.id,
215 ip=get_ip(request),
216 provided=submission
217 )
218 db.session.add(wrong)
219 db.session.commit()
220 db.session.close()
221
222
223 class DynamicChallenge(Challenges):
224 __mapper_args__ = {'polymorphic_identity': 'dynamic'}
225 id = db.Column(None, db.ForeignKey('challenges.id'), primary_key=True)
226 initial = db.Column(db.Integer)
227 minimum = db.Column(db.Integer)
228 decay = db.Column(db.Integer)
229
230 def __init__(self, *args, **kwargs):
231 super(DynamicChallenge, self).__init__(**kwargs)
232 self.initial = kwargs['value']
233
234
235 def load(app):
236 # upgrade()
237 app.db.create_all()
238 CHALLENGE_CLASSES['dynamic'] = DynamicValueChallenge
239 register_plugin_assets_directory(app, base_path='/plugins/dynamic_challenges/assets/')
240
[end of CTFd/plugins/dynamic_challenges/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py
--- a/CTFd/plugins/dynamic_challenges/__init__.py
+++ b/CTFd/plugins/dynamic_challenges/__init__.py
@@ -7,6 +7,7 @@
from CTFd.utils.migrations import upgrade
from CTFd.utils.user import get_ip
from CTFd.utils.uploads import upload_file, delete_file
+from CTFd.utils.modes import get_model
from flask import Blueprint
import math
@@ -92,9 +93,11 @@
for attr, value in data.items():
setattr(challenge, attr, value)
+ Model = get_model()
+
solve_count = Solves.query \
- .join(Teams, Solves.team_id == Teams.id) \
- .filter(Solves.challenge_id == challenge.id, Teams.banned == False) \
+ .join(Model, Solves.account_id == Model.id) \
+ .filter(Solves.challenge_id == challenge.id, Model.hidden == False, Model.banned == False) \
.count()
# It is important that this calculation takes into account floats.
@@ -165,9 +168,11 @@
data = request.form or request.get_json()
submission = data['submission'].strip()
- solve_count = Solves.query\
- .join(Teams, Solves.team_id == Teams.id)\
- .filter(Solves.challenge_id == chal.id, Teams.banned == False)\
+ Model = get_model()
+
+ solve_count = Solves.query \
+ .join(Model, Solves.account_id == Model.id) \
+ .filter(Solves.challenge_id == challenge.id, Model.hidden == False, Model.banned == False) \
.count()
# It is important that this calculation takes into account floats.
| {"golden_diff": "diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py\n--- a/CTFd/plugins/dynamic_challenges/__init__.py\n+++ b/CTFd/plugins/dynamic_challenges/__init__.py\n@@ -7,6 +7,7 @@\n from CTFd.utils.migrations import upgrade\n from CTFd.utils.user import get_ip\n from CTFd.utils.uploads import upload_file, delete_file\n+from CTFd.utils.modes import get_model\n from flask import Blueprint\n import math\n \n@@ -92,9 +93,11 @@\n for attr, value in data.items():\n setattr(challenge, attr, value)\n \n+ Model = get_model()\n+\n solve_count = Solves.query \\\n- .join(Teams, Solves.team_id == Teams.id) \\\n- .filter(Solves.challenge_id == challenge.id, Teams.banned == False) \\\n+ .join(Model, Solves.account_id == Model.id) \\\n+ .filter(Solves.challenge_id == challenge.id, Model.hidden == False, Model.banned == False) \\\n .count()\n \n # It is important that this calculation takes into account floats.\n@@ -165,9 +168,11 @@\n data = request.form or request.get_json()\n submission = data['submission'].strip()\n \n- solve_count = Solves.query\\\n- .join(Teams, Solves.team_id == Teams.id)\\\n- .filter(Solves.challenge_id == chal.id, Teams.banned == False)\\\n+ Model = get_model()\n+\n+ solve_count = Solves.query \\\n+ .join(Model, Solves.account_id == Model.id) \\\n+ .filter(Solves.challenge_id == challenge.id, Model.hidden == False, Model.banned == False) \\\n .count()\n \n # It is important that this calculation takes into account floats.\n", "issue": "dynamic challenges not work when using user mode\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit:`2bd310b5d99f4305f8b61076f9af3b7e3c91ba8f`\r\n - Operating System: `ubuntu 16.04`\r\n - Web Browser and Version: `Chrome`\r\n\r\nwhen using user mode , `solve_count` always get 0 \r\n\r\nhttps://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L95\r\n```\r\n solve_count = Solves.query \\\r\n .join(Teams, Solves.team_id == Teams.id) \\\r\n .filter(Solves.challenge_id == challenge.id, Teams.banned == False) \\\r\n .count()\r\n```\r\n\n", "before_files": [{"content": "from __future__ import division # Use floating point for math calculations\nfrom CTFd.plugins.challenges import BaseChallenge, CHALLENGE_CLASSES\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.flags import get_flag_class\nfrom CTFd.models import db, Solves, Fails, Flags, Challenges, ChallengeFiles, Tags, Teams, Hints\nfrom CTFd import utils\nfrom CTFd.utils.migrations import upgrade\nfrom CTFd.utils.user import get_ip\nfrom CTFd.utils.uploads import upload_file, delete_file\nfrom flask import Blueprint\nimport math\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = { # Handlebars templates used for each aspect of challenge editing & viewing\n 'create': '/plugins/dynamic_challenges/assets/create.html',\n 'update': '/plugins/dynamic_challenges/assets/update.html',\n 'view': '/plugins/dynamic_challenges/assets/view.html',\n }\n scripts = { # Scripts that are loaded when a template is loaded\n 'create': '/plugins/dynamic_challenges/assets/create.js',\n 'update': '/plugins/dynamic_challenges/assets/update.js',\n 'view': '/plugins/dynamic_challenges/assets/view.js',\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = '/plugins/dynamic_challenges/assets/'\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint('dynamic_challenges', __name__, template_folder='templates', static_folder='assets')\n\n @staticmethod\n def create(request):\n \"\"\"\n This method is used to process the challenge creation request.\n\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n challenge = DynamicChallenge(**data)\n\n db.session.add(challenge)\n db.session.commit()\n\n return challenge\n\n @staticmethod\n def read(challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n 'id': challenge.id,\n 'name': challenge.name,\n 'value': challenge.value,\n 'initial': challenge.initial,\n 'decay': challenge.decay,\n 'minimum': challenge.minimum,\n 'description': challenge.description,\n 'category': challenge.category,\n 'state': challenge.state,\n 'max_attempts': challenge.max_attempts,\n 'type': challenge.type,\n 'type_data': {\n 'id': DynamicValueChallenge.id,\n 'name': DynamicValueChallenge.name,\n 'templates': DynamicValueChallenge.templates,\n 'scripts': DynamicValueChallenge.scripts,\n }\n }\n return data\n\n @staticmethod\n def update(challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n data['initial'] = float(data.get('initial', 0))\n data['minimum'] = float(data.get('minimum', 0))\n data['decay'] = float(data.get('decay', 0))\n for attr, value in data.items():\n setattr(challenge, attr, value)\n\n solve_count = Solves.query \\\n .join(Teams, Solves.team_id == Teams.id) \\\n .filter(Solves.challenge_id == challenge.id, Teams.banned == False) \\\n .count()\n\n # It is important that this calculation takes into account floats.\n # Hence this file uses from __future__ import division\n value = (((challenge.minimum - challenge.initial) / (challenge.decay ** 2)) * (solve_count ** 2)) + challenge.initial\n\n value = math.ceil(value)\n\n if value < challenge.minimum:\n value = challenge.minimum\n\n challenge.value = value\n\n db.session.commit()\n return challenge\n\n @staticmethod\n def delete(challenge):\n \"\"\"\n This method is used to delete the resources used by a challenge.\n\n :param challenge:\n :return:\n \"\"\"\n Fails.query.filter_by(challenge_id=challenge.id).delete()\n Solves.query.filter_by(challenge_id=challenge.id).delete()\n Flags.query.filter_by(challenge_id=challenge.id).delete()\n files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()\n for f in files:\n delete_file(f.id)\n ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()\n Tags.query.filter_by(challenge_id=challenge.id).delete()\n Hints.query.filter_by(challenge_id=challenge.id).delete()\n DynamicChallenge.query.filter_by(id=challenge.id).delete()\n Challenges.query.filter_by(id=challenge.id).delete()\n db.session.commit()\n\n @staticmethod\n def attempt(challenge, request):\n \"\"\"\n This method is used to check whether a given input is right or wrong. It does not make any changes and should\n return a boolean for correctness and a string to be shown to the user. It is also in charge of parsing the\n user's input from the request itself.\n\n :param challenge: The Challenge object from the database\n :param request: The request the user submitted\n :return: (boolean, string)\n \"\"\"\n data = request.form or request.get_json()\n submission = data['submission'].strip()\n flags = Flags.query.filter_by(challenge_id=challenge.id).all()\n for flag in flags:\n if get_flag_class(flag.type).compare(flag, submission):\n return True, 'Correct'\n return False, 'Incorrect'\n\n @staticmethod\n def solve(user, team, challenge, request):\n \"\"\"\n This method is used to insert Solves into the database in order to mark a challenge as solved.\n\n :param team: The Team object from the database\n :param chal: The Challenge object from the database\n :param request: The request the user submitted\n :return:\n \"\"\"\n chal = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = request.form or request.get_json()\n submission = data['submission'].strip()\n\n solve_count = Solves.query\\\n .join(Teams, Solves.team_id == Teams.id)\\\n .filter(Solves.challenge_id == chal.id, Teams.banned == False)\\\n .count()\n\n # It is important that this calculation takes into account floats.\n # Hence this file uses from __future__ import division\n value = (\n (\n (chal.minimum - chal.initial) / (chal.decay**2)\n ) * (solve_count**2)\n ) + chal.initial\n\n value = math.ceil(value)\n\n if value < chal.minimum:\n value = chal.minimum\n\n chal.value = value\n\n solve = Solves(\n user_id=user.id,\n team_id=team.id if team else None,\n challenge_id=challenge.id,\n ip=get_ip(req=request),\n provided=submission\n )\n db.session.add(solve)\n db.session.commit()\n db.session.close()\n\n @staticmethod\n def fail(user, team, challenge, request):\n \"\"\"\n This method is used to insert Fails into the database in order to mark an answer incorrect.\n\n :param team: The Team object from the database\n :param challenge: The Challenge object from the database\n :param request: The request the user submitted\n :return:\n \"\"\"\n data = request.form or request.get_json()\n submission = data['submission'].strip()\n wrong = Fails(\n user_id=user.id,\n team_id=team.id if team else None,\n challenge_id=challenge.id,\n ip=get_ip(request),\n provided=submission\n )\n db.session.add(wrong)\n db.session.commit()\n db.session.close()\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {'polymorphic_identity': 'dynamic'}\n id = db.Column(None, db.ForeignKey('challenges.id'), primary_key=True)\n initial = db.Column(db.Integer)\n minimum = db.Column(db.Integer)\n decay = db.Column(db.Integer)\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.initial = kwargs['value']\n\n\ndef load(app):\n # upgrade()\n app.db.create_all()\n CHALLENGE_CLASSES['dynamic'] = DynamicValueChallenge\n register_plugin_assets_directory(app, base_path='/plugins/dynamic_challenges/assets/')\n", "path": "CTFd/plugins/dynamic_challenges/__init__.py"}]} | 3,251 | 422 |
gh_patches_debug_112 | rasdani/github-patches | git_diff | InstaPy__InstaPy-4046 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instapy-chromedriver not supporting latest Chrome browser version
The Instapy-chrome driver only supports Chrome upto versions 71 and since the update, the whole program quits with the error of ensure chromedriver is installed at .../insta-py/chromedriver_linux64..
</issue>
<code>
[start of instapy/__init__.py]
1 # flake8: noqa
2
3 from .instapy import InstaPy
4 from .util import smart_run
5 from .settings import Settings
6 from .file_manager import set_workspace
7 from .file_manager import get_workspace
8
9
10 # __variables__ with double-quoted values will be available in setup.py
11 __version__ = "0.2.1"
12
13
[end of instapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instapy/__init__.py b/instapy/__init__.py
--- a/instapy/__init__.py
+++ b/instapy/__init__.py
@@ -8,5 +8,5 @@
# __variables__ with double-quoted values will be available in setup.py
-__version__ = "0.2.1"
+__version__ = "0.2.2"
| {"golden_diff": "diff --git a/instapy/__init__.py b/instapy/__init__.py\n--- a/instapy/__init__.py\n+++ b/instapy/__init__.py\n@@ -8,5 +8,5 @@\n \n \n # __variables__ with double-quoted values will be available in setup.py\n-__version__ = \"0.2.1\"\n+__version__ = \"0.2.2\"\n", "issue": "Instapy-chromedriver not supporting latest Chrome browser version\nThe Instapy-chrome driver only supports Chrome upto versions 71 and since the update, the whole program quits with the error of ensure chromedriver is installed at .../insta-py/chromedriver_linux64..\n", "before_files": [{"content": "# flake8: noqa\n\nfrom .instapy import InstaPy\nfrom .util import smart_run\nfrom .settings import Settings\nfrom .file_manager import set_workspace\nfrom .file_manager import get_workspace\n\n\n# __variables__ with double-quoted values will be available in setup.py\n__version__ = \"0.2.1\"\n\n", "path": "instapy/__init__.py"}]} | 689 | 91 |
gh_patches_debug_20853 | rasdani/github-patches | git_diff | adap__flower-2768 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If gRPC server is running behing a load balancer, clients may not be able to connect
### Describe the bug
Flower is using the `peer()` method from `grpc.ServicerContext` in order to identify unique flower clients. However, in some situations (like when running the gRPC server behind a load balancer or proxy) different clients can have the same peer identifier (i.e. the same `IP:PORT`), as HTTP/2 connections are multiplexed.
### Steps/Code to Reproduce
This can be tested with a simple [Traefik](https://doc.traefik.io/traefik/) setup as follows (https://doc.traefik.io/traefik/user-guides/grpc/):
`traefik.yaml`:
```
entryPoints:
web:
address: :80
providers:
file:
filename: grpc.yaml
api: {}
```
`grpc.yaml`:
```
http:
routers:
to-grpc:
entryPoints:
- "web"
service: "srv-grpc"
rule: "HostRegexp(`.*`)"
services:
srv-grpc:
loadBalancer:
servers:
- url: h2c://127.0.0.1:5000
```
The Traefik server can be executed as follows:
```
./traefik --configFile=traefik.yaml
```
Then we can launch then a Flower sever with the following code:
```
import flwr as fl
def acc_wavg(metrics):
n = sum([i for i, _ in metrics])
acc = sum([i * metric["accuracy"] / n for i, metric in metrics])
return {"accuracy": acc}
strategy = fl.server.strategy.FedAvg(
min_available_clients=2,
evaluate_metrics_aggregation_fn=acc_wavg
)
fl.server.start_server(
server_address="0.0.0.0:5000",
config=fl.server.ServerConfig(num_rounds=3),
strategy=strategy,
)
```
### Expected Results
When we connect two clients to the loadbalancer IP, on port 80, the first one connects and keeps waiting:
```
INFO flwr 2023-06-28 10:03:14,848 | grpc.py:50 | Opened insecure gRPC connection (no certificates were passed)
DEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.IDLE
DEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.CONNECTING
DEBUG flwr 2023-06-28 10:03:14,875 | connection.py:39 | ChannelConnectivity.READY
```
However, the second one is not connected:
```
DEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.IDLE
DEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.CONNECTING
DEBUG flwr 2023-06-28 09:37:32,578 | connection.py:39 | ChannelConnectivity.READY
DEBUG flwr 2023-06-28 09:37:32,784 | connection.py:113 | gRPC channel closed
Traceback (most recent call last):
File "/home/alvaro/w/prj/fl/fl.py", line 68, in <module>
fl.client.start_numpy_client(
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py", line 252, in start_numpy_client
start_client(
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py", line 174, in start_client
server_message = receive()
^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/grpc_client/connection.py", line 105, in <lambda>
receive: Callable[[], ServerMessage] = lambda: next(server_message_iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py", line 475, in __next__
return self._next()
^^^^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py", line 879, in _next
raise StopIteration()
StopIteration
```
This is because the second client connection is discarded [here]https://github.com/adap/flower/blob/main/src/py/flwr/server/client_manager.py#L149) as it is coming from the same gRPC peer (i.e. the load balancer) due to this code [here](https://github.com/adap/flower/blob/main/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py#L91), as due to the above I assume that the tuple `ip:port` is not unique for gRPC clients, therefore it cannot be used to identify a unique flower client (as a matter of fact, the `ip:port` are those of the load balancer, not those of the actual client).
As a test and quick hack we have moved to use uuids to identify the clients [here](https://github.com/AI4EOSC/flower/commit/b215d9f3cce1ad8806e296db4fe105a8b7f5c6c9). To my understanding, it is safe to do that way, as the peer identifier is not used elsewhere, but I do not know if it is planned to do something else with them.
### Actual Results
Two and more clients should be able to connect when running behind a load balancer.
</issue>
<code>
[start of src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py]
1 # Copyright 2020 Flower Labs GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Servicer for FlowerService.
16
17 Relevant knowledge for reading this modules code:
18 - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
19 """
20
21
22 from typing import Callable, Iterator
23
24 import grpc
25 from iterators import TimeoutIterator
26
27 from flwr.proto import transport_pb2_grpc # pylint: disable=E0611
28 from flwr.proto.transport_pb2 import ( # pylint: disable=E0611
29 ClientMessage,
30 ServerMessage,
31 )
32 from flwr.server.client_manager import ClientManager
33 from flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper
34 from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy
35
36
37 def default_bridge_factory() -> GrpcBridge:
38 """Return GrpcBridge instance."""
39 return GrpcBridge()
40
41
42 def default_grpc_client_proxy_factory(cid: str, bridge: GrpcBridge) -> GrpcClientProxy:
43 """Return GrpcClientProxy instance."""
44 return GrpcClientProxy(cid=cid, bridge=bridge)
45
46
47 def register_client_proxy(
48 client_manager: ClientManager,
49 client_proxy: GrpcClientProxy,
50 context: grpc.ServicerContext,
51 ) -> bool:
52 """Try registering GrpcClientProxy with ClientManager."""
53 is_success = client_manager.register(client_proxy)
54 if is_success:
55
56 def rpc_termination_callback() -> None:
57 client_proxy.bridge.close()
58 client_manager.unregister(client_proxy)
59
60 context.add_callback(rpc_termination_callback)
61 return is_success
62
63
64 class FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):
65 """FlowerServiceServicer for bi-directional gRPC message stream."""
66
67 def __init__(
68 self,
69 client_manager: ClientManager,
70 grpc_bridge_factory: Callable[[], GrpcBridge] = default_bridge_factory,
71 grpc_client_proxy_factory: Callable[
72 [str, GrpcBridge], GrpcClientProxy
73 ] = default_grpc_client_proxy_factory,
74 ) -> None:
75 self.client_manager: ClientManager = client_manager
76 self.grpc_bridge_factory = grpc_bridge_factory
77 self.client_proxy_factory = grpc_client_proxy_factory
78
79 def Join( # pylint: disable=invalid-name
80 self,
81 request_iterator: Iterator[ClientMessage],
82 context: grpc.ServicerContext,
83 ) -> Iterator[ServerMessage]:
84 """Facilitate bi-directional streaming of messages between server and client.
85
86 Invoked by each gRPC client which participates in the network.
87
88 Protocol:
89 - The first message is sent from the server to the client
90 - Both `ServerMessage` and `ClientMessage` are message "wrappers"
91 wrapping the actual message
92 - The `Join` method is (pretty much) unaware of the protocol
93 """
94 peer: str = context.peer()
95 bridge = self.grpc_bridge_factory()
96 client_proxy = self.client_proxy_factory(peer, bridge)
97 is_success = register_client_proxy(self.client_manager, client_proxy, context)
98
99 if is_success:
100 # Get iterators
101 client_message_iterator = TimeoutIterator(
102 iterator=request_iterator, reset_on_next=True
103 )
104 ins_wrapper_iterator = bridge.ins_wrapper_iterator()
105
106 # All messages will be pushed to client bridge directly
107 while True:
108 try:
109 # Get ins_wrapper from bridge and yield server_message
110 ins_wrapper: InsWrapper = next(ins_wrapper_iterator)
111 yield ins_wrapper.server_message
112
113 # Set current timeout, might be None
114 if ins_wrapper.timeout is not None:
115 client_message_iterator.set_timeout(ins_wrapper.timeout)
116
117 # Wait for client message
118 client_message = next(client_message_iterator)
119
120 if client_message is client_message_iterator.get_sentinel():
121 # Important: calling `context.abort` in gRPC always
122 # raises an exception so that all code after the call to
123 # `context.abort` will not run. If subsequent code should
124 # be executed, the `rpc_termination_callback` can be used
125 # (as shown in the `register_client` function).
126 details = f"Timeout of {ins_wrapper.timeout}sec was exceeded."
127 context.abort(
128 code=grpc.StatusCode.DEADLINE_EXCEEDED,
129 details=details,
130 )
131 # This return statement is only for the linter so it understands
132 # that client_message in subsequent lines is not None
133 # It does not understand that `context.abort` will terminate
134 # this execution context by raising an exception.
135 return
136
137 bridge.set_res_wrapper(
138 res_wrapper=ResWrapper(client_message=client_message)
139 )
140 except StopIteration:
141 break
142
[end of src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
--- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
+++ b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
@@ -18,7 +18,7 @@
- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
"""
-
+import uuid
from typing import Callable, Iterator
import grpc
@@ -88,7 +88,11 @@
wrapping the actual message
- The `Join` method is (pretty much) unaware of the protocol
"""
- peer: str = context.peer()
+ # NOTE(aloga): this is a hack. when running flower behind a proxy, the peer can
+ # be the same for different clients (i.e. ip:port) so we use a uuid that is
+ # unique.
+ # peer: str = context.peer()
+ peer = uuid.uuid4().hex
bridge = self.grpc_bridge_factory()
client_proxy = self.client_proxy_factory(peer, bridge)
is_success = register_client_proxy(self.client_manager, client_proxy, context)
| {"golden_diff": "diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n--- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n+++ b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n@@ -18,7 +18,7 @@\n - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n \"\"\"\n \n-\n+import uuid\n from typing import Callable, Iterator\n \n import grpc\n@@ -88,7 +88,11 @@\n wrapping the actual message\n - The `Join` method is (pretty much) unaware of the protocol\n \"\"\"\n- peer: str = context.peer()\n+ # NOTE(aloga): this is a hack. when running flower behind a proxy, the peer can\n+ # be the same for different clients (i.e. ip:port) so we use a uuid that is\n+ # unique.\n+ # peer: str = context.peer()\n+ peer = uuid.uuid4().hex\n bridge = self.grpc_bridge_factory()\n client_proxy = self.client_proxy_factory(peer, bridge)\n is_success = register_client_proxy(self.client_manager, client_proxy, context)\n", "issue": "If gRPC server is running behing a load balancer, clients may not be able to connect\n### Describe the bug\r\n\r\nFlower is using the `peer()` method from `grpc.ServicerContext` in order to identify unique flower clients. However, in some situations (like when running the gRPC server behind a load balancer or proxy) different clients can have the same peer identifier (i.e. the same `IP:PORT`), as HTTP/2 connections are multiplexed.\r\n\r\n### Steps/Code to Reproduce\r\n\r\nThis can be tested with a simple [Traefik](https://doc.traefik.io/traefik/) setup as follows (https://doc.traefik.io/traefik/user-guides/grpc/):\r\n\r\n`traefik.yaml`:\r\n```\r\nentryPoints:\r\n web:\r\n address: :80\r\n\r\nproviders:\r\n file:\r\n filename: grpc.yaml\r\n\r\napi: {}\r\n```\r\n\r\n`grpc.yaml`:\r\n```\r\nhttp:\r\n routers:\r\n to-grpc:\r\n entryPoints: \r\n - \"web\"\r\n service: \"srv-grpc\"\r\n rule: \"HostRegexp(`.*`)\"\r\n\r\n services:\r\n srv-grpc:\r\n loadBalancer:\r\n servers:\r\n - url: h2c://127.0.0.1:5000\r\n```\r\n\r\nThe Traefik server can be executed as follows:\r\n```\r\n ./traefik --configFile=traefik.yaml\r\n```\r\n\r\nThen we can launch then a Flower sever with the following code:\r\n```\r\nimport flwr as fl\r\n\r\ndef acc_wavg(metrics):\r\n n = sum([i for i, _ in metrics])\r\n acc = sum([i * metric[\"accuracy\"] / n for i, metric in metrics])\r\n return {\"accuracy\": acc}\r\n \r\nstrategy = fl.server.strategy.FedAvg(\r\n min_available_clients=2,\r\n evaluate_metrics_aggregation_fn=acc_wavg\r\n)\r\n \r\nfl.server.start_server(\r\n server_address=\"0.0.0.0:5000\",\r\n config=fl.server.ServerConfig(num_rounds=3),\r\n strategy=strategy,\r\n)\r\n```\r\n\r\n### Expected Results\r\n\r\nWhen we connect two clients to the loadbalancer IP, on port 80, the first one connects and keeps waiting:\r\n\r\n```\r\nINFO flwr 2023-06-28 10:03:14,848 | grpc.py:50 | Opened insecure gRPC connection (no certificates were passed)\r\nDEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.IDLE\r\nDEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.CONNECTING\r\nDEBUG flwr 2023-06-28 10:03:14,875 | connection.py:39 | ChannelConnectivity.READY\r\n```\r\n\r\nHowever, the second one is not connected:\r\n```\r\nDEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.IDLE\r\nDEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.CONNECTING\r\nDEBUG flwr 2023-06-28 09:37:32,578 | connection.py:39 | ChannelConnectivity.READY\r\nDEBUG flwr 2023-06-28 09:37:32,784 | connection.py:113 | gRPC channel closed\r\nTraceback (most recent call last):\r\n File \"/home/alvaro/w/prj/fl/fl.py\", line 68, in <module>\r\n fl.client.start_numpy_client(\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py\", line 252, in start_numpy_client\r\n start_client(\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py\", line 174, in start_client\r\n server_message = receive()\r\n ^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/grpc_client/connection.py\", line 105, in <lambda>\r\n receive: Callable[[], ServerMessage] = lambda: next(server_message_iterator)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py\", line 475, in __next__\r\n return self._next()\r\n ^^^^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py\", line 879, in _next\r\n raise StopIteration()\r\nStopIteration\r\n```\r\n\r\nThis is because the second client connection is discarded [here]https://github.com/adap/flower/blob/main/src/py/flwr/server/client_manager.py#L149) as it is coming from the same gRPC peer (i.e. the load balancer) due to this code [here](https://github.com/adap/flower/blob/main/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py#L91), as due to the above I assume that the tuple `ip:port` is not unique for gRPC clients, therefore it cannot be used to identify a unique flower client (as a matter of fact, the `ip:port` are those of the load balancer, not those of the actual client).\r\n\r\nAs a test and quick hack we have moved to use uuids to identify the clients [here](https://github.com/AI4EOSC/flower/commit/b215d9f3cce1ad8806e296db4fe105a8b7f5c6c9). To my understanding, it is safe to do that way, as the peer identifier is not used elsewhere, but I do not know if it is planned to do something else with them.\r\n\r\n### Actual Results\r\n\r\nTwo and more clients should be able to connect when running behind a load balancer.\n", "before_files": [{"content": "# Copyright 2020 Flower Labs GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Servicer for FlowerService.\n\nRelevant knowledge for reading this modules code:\n- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n\"\"\"\n\n\nfrom typing import Callable, Iterator\n\nimport grpc\nfrom iterators import TimeoutIterator\n\nfrom flwr.proto import transport_pb2_grpc # pylint: disable=E0611\nfrom flwr.proto.transport_pb2 import ( # pylint: disable=E0611\n ClientMessage,\n ServerMessage,\n)\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper\nfrom flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy\n\n\ndef default_bridge_factory() -> GrpcBridge:\n \"\"\"Return GrpcBridge instance.\"\"\"\n return GrpcBridge()\n\n\ndef default_grpc_client_proxy_factory(cid: str, bridge: GrpcBridge) -> GrpcClientProxy:\n \"\"\"Return GrpcClientProxy instance.\"\"\"\n return GrpcClientProxy(cid=cid, bridge=bridge)\n\n\ndef register_client_proxy(\n client_manager: ClientManager,\n client_proxy: GrpcClientProxy,\n context: grpc.ServicerContext,\n) -> bool:\n \"\"\"Try registering GrpcClientProxy with ClientManager.\"\"\"\n is_success = client_manager.register(client_proxy)\n if is_success:\n\n def rpc_termination_callback() -> None:\n client_proxy.bridge.close()\n client_manager.unregister(client_proxy)\n\n context.add_callback(rpc_termination_callback)\n return is_success\n\n\nclass FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):\n \"\"\"FlowerServiceServicer for bi-directional gRPC message stream.\"\"\"\n\n def __init__(\n self,\n client_manager: ClientManager,\n grpc_bridge_factory: Callable[[], GrpcBridge] = default_bridge_factory,\n grpc_client_proxy_factory: Callable[\n [str, GrpcBridge], GrpcClientProxy\n ] = default_grpc_client_proxy_factory,\n ) -> None:\n self.client_manager: ClientManager = client_manager\n self.grpc_bridge_factory = grpc_bridge_factory\n self.client_proxy_factory = grpc_client_proxy_factory\n\n def Join( # pylint: disable=invalid-name\n self,\n request_iterator: Iterator[ClientMessage],\n context: grpc.ServicerContext,\n ) -> Iterator[ServerMessage]:\n \"\"\"Facilitate bi-directional streaming of messages between server and client.\n\n Invoked by each gRPC client which participates in the network.\n\n Protocol:\n - The first message is sent from the server to the client\n - Both `ServerMessage` and `ClientMessage` are message \"wrappers\"\n wrapping the actual message\n - The `Join` method is (pretty much) unaware of the protocol\n \"\"\"\n peer: str = context.peer()\n bridge = self.grpc_bridge_factory()\n client_proxy = self.client_proxy_factory(peer, bridge)\n is_success = register_client_proxy(self.client_manager, client_proxy, context)\n\n if is_success:\n # Get iterators\n client_message_iterator = TimeoutIterator(\n iterator=request_iterator, reset_on_next=True\n )\n ins_wrapper_iterator = bridge.ins_wrapper_iterator()\n\n # All messages will be pushed to client bridge directly\n while True:\n try:\n # Get ins_wrapper from bridge and yield server_message\n ins_wrapper: InsWrapper = next(ins_wrapper_iterator)\n yield ins_wrapper.server_message\n\n # Set current timeout, might be None\n if ins_wrapper.timeout is not None:\n client_message_iterator.set_timeout(ins_wrapper.timeout)\n\n # Wait for client message\n client_message = next(client_message_iterator)\n\n if client_message is client_message_iterator.get_sentinel():\n # Important: calling `context.abort` in gRPC always\n # raises an exception so that all code after the call to\n # `context.abort` will not run. If subsequent code should\n # be executed, the `rpc_termination_callback` can be used\n # (as shown in the `register_client` function).\n details = f\"Timeout of {ins_wrapper.timeout}sec was exceeded.\"\n context.abort(\n code=grpc.StatusCode.DEADLINE_EXCEEDED,\n details=details,\n )\n # This return statement is only for the linter so it understands\n # that client_message in subsequent lines is not None\n # It does not understand that `context.abort` will terminate\n # this execution context by raising an exception.\n return\n\n bridge.set_res_wrapper(\n res_wrapper=ResWrapper(client_message=client_message)\n )\n except StopIteration:\n break\n", "path": "src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py"}]} | 3,394 | 275 |
gh_patches_debug_19887 | rasdani/github-patches | git_diff | aws__aws-cli-1526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws s3 sync --delete behaves differently if source is <LocalPath> rather than <S3Path>
This issue was raised on: https://stackoverflow.com/questions/32304326/amazon-s3-sync-deleting-excluded-files
Basically, doing `aws s3 sync . s3://bucket2 --delete` behaves differently to `aws s3 sync s3://bucket1 s3://bucket2 --delete` even if the two source directories are identical.
It would appear that the logic for `--delete` behaves differently between Local and S3. An example is well-documented in the stackoverflow issue.
</issue>
<code>
[start of awscli/customizations/s3/filters.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import logging
14 import fnmatch
15 import os
16
17 from awscli.customizations.s3.utils import split_s3_bucket_key
18
19
20 LOG = logging.getLogger(__name__)
21
22
23 def create_filter(parameters):
24 """Given the CLI parameters dict, create a Filter object."""
25 # We need to evaluate all the filters based on the source
26 # directory.
27 if parameters['filters']:
28 cli_filters = parameters['filters']
29 real_filters = []
30 for filter_type, filter_pattern in cli_filters:
31 real_filters.append((filter_type.lstrip('-'),
32 filter_pattern))
33 source_location = parameters['src']
34 if source_location.startswith('s3://'):
35 # This gives us (bucket, keyname) and we want
36 # the bucket to be the root dir.
37 src_rootdir = _get_s3_root(source_location,
38 parameters['dir_op'])
39 dst_rootdir = _get_local_root(parameters['dest'],
40 parameters['dir_op'])
41 else:
42 src_rootdir = _get_local_root(parameters['src'], parameters['dir_op'])
43 dst_rootdir = _get_s3_root(parameters['dest'],
44 parameters['dir_op'])
45
46 return Filter(real_filters, src_rootdir, dst_rootdir)
47 else:
48 return Filter({}, None, None)
49
50
51 def _get_s3_root(source_location, dir_op):
52 # Obtain the bucket and the key.
53 bucket, key = split_s3_bucket_key(source_location)
54 if not dir_op and not key.endswith('/'):
55 # If we are not performing an operation on a directory and the key
56 # is of the form: ``prefix/key``. We only want ``prefix`` included in
57 # the the s3 root and not ``key``.
58 key = '/'.join(key.split('/')[:-1])
59 # Rejoin the bucket and key back together.
60 s3_path = '/'.join([bucket, key])
61 return s3_path
62
63
64 def _get_local_root(source_location, dir_op):
65 if dir_op:
66 rootdir = os.path.abspath(source_location)
67 else:
68 rootdir = os.path.abspath(os.path.dirname(source_location))
69 return rootdir
70
71
72 class Filter(object):
73 """
74 This is a universal exclude/include filter.
75 """
76 def __init__(self, patterns, rootdir, dst_rootdir):
77 """
78 :var patterns: A list of patterns. A pattern consits of a list
79 whose first member is a string 'exclude' or 'include'.
80 The second member is the actual rule.
81 :var rootdir: The root directory where the patterns are evaluated.
82 This will generally be the directory of the source location.
83 :var dst_rootdir: The destination root directory where the patterns are
84 evaluated. This is only useful when the --delete option is
85 also specified.
86
87 """
88 self._original_patterns = patterns
89 self.patterns = self._full_path_patterns(patterns, rootdir)
90 self.dst_patterns = self._full_path_patterns(patterns, dst_rootdir)
91
92 def _full_path_patterns(self, original_patterns, rootdir):
93 # We need to transform the patterns into patterns that have
94 # the root dir prefixed, so things like ``--exclude "*"``
95 # will actually be ['exclude', '/path/to/root/*']
96 full_patterns = []
97 for pattern in original_patterns:
98 full_patterns.append(
99 (pattern[0], os.path.join(rootdir, pattern[1])))
100 return full_patterns
101
102 def call(self, file_infos):
103 """
104 This function iterates over through the yielded file_info objects. It
105 determines the type of the file and applies pattern matching to
106 determine if the rule applies. While iterating though the patterns the
107 file is assigned a boolean flag to determine if a file should be
108 yielded on past the filer. Anything identified by the exclude filter
109 has its flag set to false. Anything identified by the include filter
110 has its flag set to True. All files begin with the flag set to true.
111 Rules listed at the end will overwrite flags thrown by rules listed
112 before it.
113 """
114 for file_info in file_infos:
115 file_path = file_info.src
116 file_status = (file_info, True)
117 for pattern, dst_pattern in zip(self.patterns, self.dst_patterns):
118 current_file_status = self._match_pattern(pattern, file_info)
119 if current_file_status is not None:
120 file_status = current_file_status
121 dst_current_file_status = self._match_pattern(dst_pattern, file_info)
122 if dst_current_file_status is not None:
123 file_status = dst_current_file_status
124 LOG.debug("=%s final filtered status, should_include: %s",
125 file_path, file_status[1])
126 if file_status[1]:
127 yield file_info
128
129 def _match_pattern(self, pattern, file_info):
130 file_status = None
131 file_path = file_info.src
132 pattern_type = pattern[0]
133 if file_info.src_type == 'local':
134 path_pattern = pattern[1].replace('/', os.sep)
135 else:
136 path_pattern = pattern[1].replace(os.sep, '/')
137 is_match = fnmatch.fnmatch(file_path, path_pattern)
138 if is_match and pattern_type == 'include':
139 file_status = (file_info, True)
140 LOG.debug("%s matched include filter: %s",
141 file_path, path_pattern)
142 elif is_match and pattern_type == 'exclude':
143 file_status = (file_info, False)
144 LOG.debug("%s matched exclude filter: %s",
145 file_path, path_pattern)
146 else:
147 LOG.debug("%s did not match %s filter: %s",
148 file_path, pattern_type[2:], path_pattern)
149 return file_status
150
[end of awscli/customizations/s3/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/s3/filters.py b/awscli/customizations/s3/filters.py
--- a/awscli/customizations/s3/filters.py
+++ b/awscli/customizations/s3/filters.py
@@ -36,12 +36,16 @@
# the bucket to be the root dir.
src_rootdir = _get_s3_root(source_location,
parameters['dir_op'])
- dst_rootdir = _get_local_root(parameters['dest'],
- parameters['dir_op'])
else:
src_rootdir = _get_local_root(parameters['src'], parameters['dir_op'])
+
+ destination_location = parameters['dest']
+ if destination_location.startswith('s3://'):
dst_rootdir = _get_s3_root(parameters['dest'],
parameters['dir_op'])
+ else:
+ dst_rootdir = _get_local_root(parameters['dest'],
+ parameters['dir_op'])
return Filter(real_filters, src_rootdir, dst_rootdir)
else:
| {"golden_diff": "diff --git a/awscli/customizations/s3/filters.py b/awscli/customizations/s3/filters.py\n--- a/awscli/customizations/s3/filters.py\n+++ b/awscli/customizations/s3/filters.py\n@@ -36,12 +36,16 @@\n # the bucket to be the root dir.\n src_rootdir = _get_s3_root(source_location,\n parameters['dir_op'])\n- dst_rootdir = _get_local_root(parameters['dest'],\n- parameters['dir_op'])\n else:\n src_rootdir = _get_local_root(parameters['src'], parameters['dir_op'])\n+\n+ destination_location = parameters['dest']\n+ if destination_location.startswith('s3://'):\n dst_rootdir = _get_s3_root(parameters['dest'],\n parameters['dir_op'])\n+ else:\n+ dst_rootdir = _get_local_root(parameters['dest'],\n+ parameters['dir_op'])\n \n return Filter(real_filters, src_rootdir, dst_rootdir)\n else:\n", "issue": "aws s3 sync --delete behaves differently if source is <LocalPath> rather than <S3Path>\nThis issue was raised on: https://stackoverflow.com/questions/32304326/amazon-s3-sync-deleting-excluded-files\n\nBasically, doing `aws s3 sync . s3://bucket2 --delete` behaves differently to `aws s3 sync s3://bucket1 s3://bucket2 --delete` even if the two source directories are identical.\n\nIt would appear that the logic for `--delete` behaves differently between Local and S3. An example is well-documented in the stackoverflow issue.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport fnmatch\nimport os\n\nfrom awscli.customizations.s3.utils import split_s3_bucket_key\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef create_filter(parameters):\n \"\"\"Given the CLI parameters dict, create a Filter object.\"\"\"\n # We need to evaluate all the filters based on the source\n # directory.\n if parameters['filters']:\n cli_filters = parameters['filters']\n real_filters = []\n for filter_type, filter_pattern in cli_filters:\n real_filters.append((filter_type.lstrip('-'),\n filter_pattern))\n source_location = parameters['src']\n if source_location.startswith('s3://'):\n # This gives us (bucket, keyname) and we want\n # the bucket to be the root dir.\n src_rootdir = _get_s3_root(source_location,\n parameters['dir_op'])\n dst_rootdir = _get_local_root(parameters['dest'],\n parameters['dir_op'])\n else:\n src_rootdir = _get_local_root(parameters['src'], parameters['dir_op'])\n dst_rootdir = _get_s3_root(parameters['dest'],\n parameters['dir_op'])\n\n return Filter(real_filters, src_rootdir, dst_rootdir)\n else:\n return Filter({}, None, None)\n\n\ndef _get_s3_root(source_location, dir_op):\n # Obtain the bucket and the key.\n bucket, key = split_s3_bucket_key(source_location)\n if not dir_op and not key.endswith('/'):\n # If we are not performing an operation on a directory and the key\n # is of the form: ``prefix/key``. We only want ``prefix`` included in\n # the the s3 root and not ``key``.\n key = '/'.join(key.split('/')[:-1])\n # Rejoin the bucket and key back together.\n s3_path = '/'.join([bucket, key])\n return s3_path\n\n\ndef _get_local_root(source_location, dir_op):\n if dir_op:\n rootdir = os.path.abspath(source_location)\n else:\n rootdir = os.path.abspath(os.path.dirname(source_location))\n return rootdir\n\n\nclass Filter(object):\n \"\"\"\n This is a universal exclude/include filter.\n \"\"\"\n def __init__(self, patterns, rootdir, dst_rootdir):\n \"\"\"\n :var patterns: A list of patterns. A pattern consits of a list\n whose first member is a string 'exclude' or 'include'.\n The second member is the actual rule.\n :var rootdir: The root directory where the patterns are evaluated.\n This will generally be the directory of the source location.\n :var dst_rootdir: The destination root directory where the patterns are\n evaluated. This is only useful when the --delete option is\n also specified.\n\n \"\"\"\n self._original_patterns = patterns\n self.patterns = self._full_path_patterns(patterns, rootdir)\n self.dst_patterns = self._full_path_patterns(patterns, dst_rootdir)\n\n def _full_path_patterns(self, original_patterns, rootdir):\n # We need to transform the patterns into patterns that have\n # the root dir prefixed, so things like ``--exclude \"*\"``\n # will actually be ['exclude', '/path/to/root/*']\n full_patterns = []\n for pattern in original_patterns:\n full_patterns.append(\n (pattern[0], os.path.join(rootdir, pattern[1])))\n return full_patterns\n\n def call(self, file_infos):\n \"\"\"\n This function iterates over through the yielded file_info objects. It\n determines the type of the file and applies pattern matching to\n determine if the rule applies. While iterating though the patterns the\n file is assigned a boolean flag to determine if a file should be\n yielded on past the filer. Anything identified by the exclude filter\n has its flag set to false. Anything identified by the include filter\n has its flag set to True. All files begin with the flag set to true.\n Rules listed at the end will overwrite flags thrown by rules listed\n before it.\n \"\"\"\n for file_info in file_infos:\n file_path = file_info.src\n file_status = (file_info, True)\n for pattern, dst_pattern in zip(self.patterns, self.dst_patterns):\n current_file_status = self._match_pattern(pattern, file_info)\n if current_file_status is not None:\n file_status = current_file_status\n dst_current_file_status = self._match_pattern(dst_pattern, file_info)\n if dst_current_file_status is not None:\n file_status = dst_current_file_status\n LOG.debug(\"=%s final filtered status, should_include: %s\",\n file_path, file_status[1])\n if file_status[1]:\n yield file_info\n\n def _match_pattern(self, pattern, file_info):\n file_status = None\n file_path = file_info.src\n pattern_type = pattern[0]\n if file_info.src_type == 'local':\n path_pattern = pattern[1].replace('/', os.sep)\n else:\n path_pattern = pattern[1].replace(os.sep, '/')\n is_match = fnmatch.fnmatch(file_path, path_pattern)\n if is_match and pattern_type == 'include':\n file_status = (file_info, True)\n LOG.debug(\"%s matched include filter: %s\",\n file_path, path_pattern)\n elif is_match and pattern_type == 'exclude':\n file_status = (file_info, False)\n LOG.debug(\"%s matched exclude filter: %s\",\n file_path, path_pattern)\n else:\n LOG.debug(\"%s did not match %s filter: %s\",\n file_path, pattern_type[2:], path_pattern)\n return file_status\n", "path": "awscli/customizations/s3/filters.py"}]} | 2,375 | 217 |
gh_patches_debug_24791 | rasdani/github-patches | git_diff | svthalia__concrexit-1302 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: '<' not supported between instances of 'datetime.date' and 'NoneType'
Sentry Issue: [CONCREXIT-3T](https://sentry.io/organizations/thalia/issues/1937285207/?referrer=github_integration)
```
TypeError: '<' not supported between instances of 'datetime.date' and 'NoneType'
(5 additional frame(s) were not displayed)
...
File "django/views/generic/base.py", line 98, in dispatch
return handler(request, *args, **kwargs)
File "django/views/generic/detail.py", line 107, in get
context = self.get_context_data(object=self.object)
File "members/views.py", line 181, in get_context_data
societies = member_societies(member)
File "members/services.py", line 91, in member_societies
societies = _member_group_memberships(
File "members/services.py", line 45, in _member_group_memberships
if data[name]["latest"] < membership.until:
```
</issue>
<code>
[start of website/members/services.py]
1 """Services defined in the members package"""
2 from datetime import date
3 from typing import Callable, List, Dict, Any
4
5 from django.conf import settings
6 from django.db.models import Q, Count
7 from django.utils import timezone
8 from django.utils.translation import gettext
9
10 from members import emails
11 from members.models import Membership, Member
12 from utils.snippets import datetime_to_lectureyear
13
14
15 def _member_group_memberships(
16 member: Member, condition: Callable[[Membership], bool]
17 ) -> Dict[str, Any]:
18 """
19 Determines the group membership of a user based on a condition
20 :return: Object with group memberships
21 """
22 memberships = member.membergroupmembership_set.all()
23 data = {}
24
25 for membership in memberships:
26 if not condition(membership):
27 continue
28 period = {
29 "since": membership.since,
30 "until": membership.until,
31 "chair": membership.chair,
32 }
33
34 if hasattr(membership.group, "board"):
35 period["role"] = membership.role
36
37 if membership.until is None and hasattr(membership.group, "board"):
38 period["until"] = membership.group.board.until
39
40 name = membership.group.name
41 if data.get(name):
42 data[name]["periods"].append(period)
43 if data[name]["earliest"] > membership.since:
44 data[name]["earliest"] = membership.since
45 if data[name]["latest"] < membership.until:
46 data[name]["latest"] = membership.until
47 data[name]["periods"].sort(key=lambda x: x["since"])
48 else:
49 data[name] = {
50 "pk": membership.group.pk,
51 "active": membership.group.active,
52 "name": name,
53 "periods": [period],
54 "url": settings.BASE_URL + membership.group.get_absolute_url(),
55 "earliest": membership.since,
56 "latest": membership.until,
57 }
58 return data
59
60
61 def member_achievements(member) -> List:
62 """
63 Derives a list of achievements of a member
64 Committee and board memberships + mentorships
65 """
66 achievements = _member_group_memberships(
67 member,
68 lambda membership: (
69 hasattr(membership.group, "board") or hasattr(membership.group, "committee")
70 ),
71 )
72
73 mentor_years = member.mentorship_set.all()
74 for mentor_year in mentor_years:
75 name = "Mentor in {}".format(mentor_year.year)
76 # Ensure mentorships appear last but are sorted
77 earliest = date.today()
78 earliest = earliest.replace(year=earliest.year + mentor_year.year)
79 if not achievements.get(name):
80 achievements[name] = {
81 "name": name,
82 "earliest": earliest,
83 }
84 return sorted(achievements.values(), key=lambda x: x["earliest"])
85
86
87 def member_societies(member) -> List:
88 """
89 Derives a list of societies a member was part of
90 """
91 societies = _member_group_memberships(
92 member, lambda membership: (hasattr(membership.group, "society"))
93 )
94 return sorted(societies.values(), key=lambda x: x["earliest"])
95
96
97 def gen_stats_member_type() -> Dict[str, int]:
98 """
99 Generate a dictionary where every key is a member type with
100 the value being the number of current members of that type
101 """
102
103 data = {}
104 for key, display in Membership.MEMBERSHIP_TYPES:
105 data[str(display)] = (
106 Membership.objects.filter(since__lte=date.today())
107 .filter(Q(until__isnull=True) | Q(until__gt=date.today()))
108 .filter(type=key)
109 .count()
110 )
111 return data
112
113
114 def gen_stats_year() -> Dict[str, Dict[str, int]]:
115 """
116 Generate list with 6 entries, where each entry represents the total amount
117 of Thalia members in a year. The sixth element contains all the multi-year
118 students.
119 """
120 stats_year = {}
121 current_year = datetime_to_lectureyear(date.today())
122
123 for i in range(5):
124 new = {}
125 for key, _ in Membership.MEMBERSHIP_TYPES:
126 new[key] = (
127 Membership.objects.filter(user__profile__starting_year=current_year - i)
128 .filter(since__lte=date.today())
129 .filter(Q(until__isnull=True) | Q(until__gt=date.today()))
130 .filter(type=key)
131 .count()
132 )
133 stats_year[str(current_year - i)] = new
134
135 # Add multi year members
136 new = {}
137 for key, _ in Membership.MEMBERSHIP_TYPES:
138 new[key] = (
139 Membership.objects.filter(user__profile__starting_year__lt=current_year - 4)
140 .filter(since__lte=date.today())
141 .filter(Q(until__isnull=True) | Q(until__gt=date.today()))
142 .filter(type=key)
143 .count()
144 )
145 stats_year[str(gettext("Older"))] = new
146
147 return stats_year
148
149
150 def verify_email_change(change_request) -> None:
151 """
152 Mark the email change request as verified
153
154 :param change_request: the email change request
155 """
156 change_request.verified = True
157 change_request.save()
158
159 process_email_change(change_request)
160
161
162 def confirm_email_change(change_request) -> None:
163 """
164 Mark the email change request as verified
165
166 :param change_request: the email change request
167 """
168 change_request.confirmed = True
169 change_request.save()
170
171 process_email_change(change_request)
172
173
174 def process_email_change(change_request) -> None:
175 """
176 Change the user's email address if the request was completed and
177 send the completion email
178
179 :param change_request: the email change request
180 """
181 if not change_request.completed:
182 return
183
184 member = change_request.member
185 member.email = change_request.email
186 member.save()
187
188 emails.send_email_change_completion_message(change_request)
189
190
191 def execute_data_minimisation(dry_run=False, members=None) -> List[Member]:
192 """
193 Clean the profiles of members/users of whom the last membership ended
194 at least 31 days ago
195
196 :param dry_run: does not really remove data if True
197 :param members: queryset of members to process, optional
198 :return: list of processed members
199 """
200 if not members:
201 members = Member.objects
202 members = (
203 members.annotate(membership_count=Count("membership"))
204 .exclude(
205 (
206 Q(membership__until__isnull=True)
207 | Q(membership__until__gt=timezone.now().date())
208 )
209 & Q(membership_count__gt=0)
210 )
211 .distinct()
212 .prefetch_related("membership_set", "profile")
213 )
214 deletion_period = timezone.now().date() - timezone.timedelta(days=31)
215 processed_members = []
216 for member in members:
217 if (
218 member.latest_membership is None
219 or member.latest_membership.until <= deletion_period
220 ):
221 processed_members.append(member)
222 profile = member.profile
223 profile.student_number = None
224 profile.phone_number = None
225 profile.address_street = None
226 profile.address_street2 = None
227 profile.address_postal_code = None
228 profile.address_city = None
229 profile.address_country = None
230 profile.birthday = None
231 profile.emergency_contact_phone_number = None
232 profile.emergency_contact = None
233 member.bank_accounts.all().delete()
234 if not dry_run:
235 profile.save()
236
237 return processed_members
238
[end of website/members/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/services.py b/website/members/services.py
--- a/website/members/services.py
+++ b/website/members/services.py
@@ -40,10 +40,13 @@
name = membership.group.name
if data.get(name):
data[name]["periods"].append(period)
- if data[name]["earliest"] > membership.since:
- data[name]["earliest"] = membership.since
- if data[name]["latest"] < membership.until:
- data[name]["latest"] = membership.until
+ if data[name]["earliest"] > period["since"]:
+ data[name]["earliest"] = period["since"]
+ if period["until"] is None or (
+ data[name]["latest"] is not None
+ and data[name]["latest"] < period["until"]
+ ):
+ data[name]["latest"] = period["until"]
data[name]["periods"].sort(key=lambda x: x["since"])
else:
data[name] = {
@@ -52,8 +55,8 @@
"name": name,
"periods": [period],
"url": settings.BASE_URL + membership.group.get_absolute_url(),
- "earliest": membership.since,
- "latest": membership.until,
+ "earliest": period["since"],
+ "latest": period["until"],
}
return data
| {"golden_diff": "diff --git a/website/members/services.py b/website/members/services.py\n--- a/website/members/services.py\n+++ b/website/members/services.py\n@@ -40,10 +40,13 @@\n name = membership.group.name\n if data.get(name):\n data[name][\"periods\"].append(period)\n- if data[name][\"earliest\"] > membership.since:\n- data[name][\"earliest\"] = membership.since\n- if data[name][\"latest\"] < membership.until:\n- data[name][\"latest\"] = membership.until\n+ if data[name][\"earliest\"] > period[\"since\"]:\n+ data[name][\"earliest\"] = period[\"since\"]\n+ if period[\"until\"] is None or (\n+ data[name][\"latest\"] is not None\n+ and data[name][\"latest\"] < period[\"until\"]\n+ ):\n+ data[name][\"latest\"] = period[\"until\"]\n data[name][\"periods\"].sort(key=lambda x: x[\"since\"])\n else:\n data[name] = {\n@@ -52,8 +55,8 @@\n \"name\": name,\n \"periods\": [period],\n \"url\": settings.BASE_URL + membership.group.get_absolute_url(),\n- \"earliest\": membership.since,\n- \"latest\": membership.until,\n+ \"earliest\": period[\"since\"],\n+ \"latest\": period[\"until\"],\n }\n return data\n", "issue": "TypeError: '<' not supported between instances of 'datetime.date' and 'NoneType'\nSentry Issue: [CONCREXIT-3T](https://sentry.io/organizations/thalia/issues/1937285207/?referrer=github_integration)\n\n```\nTypeError: '<' not supported between instances of 'datetime.date' and 'NoneType'\n(5 additional frame(s) were not displayed)\n...\n File \"django/views/generic/base.py\", line 98, in dispatch\n return handler(request, *args, **kwargs)\n File \"django/views/generic/detail.py\", line 107, in get\n context = self.get_context_data(object=self.object)\n File \"members/views.py\", line 181, in get_context_data\n societies = member_societies(member)\n File \"members/services.py\", line 91, in member_societies\n societies = _member_group_memberships(\n File \"members/services.py\", line 45, in _member_group_memberships\n if data[name][\"latest\"] < membership.until:\n```\n", "before_files": [{"content": "\"\"\"Services defined in the members package\"\"\"\nfrom datetime import date\nfrom typing import Callable, List, Dict, Any\n\nfrom django.conf import settings\nfrom django.db.models import Q, Count\nfrom django.utils import timezone\nfrom django.utils.translation import gettext\n\nfrom members import emails\nfrom members.models import Membership, Member\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef _member_group_memberships(\n member: Member, condition: Callable[[Membership], bool]\n) -> Dict[str, Any]:\n \"\"\"\n Determines the group membership of a user based on a condition\n :return: Object with group memberships\n \"\"\"\n memberships = member.membergroupmembership_set.all()\n data = {}\n\n for membership in memberships:\n if not condition(membership):\n continue\n period = {\n \"since\": membership.since,\n \"until\": membership.until,\n \"chair\": membership.chair,\n }\n\n if hasattr(membership.group, \"board\"):\n period[\"role\"] = membership.role\n\n if membership.until is None and hasattr(membership.group, \"board\"):\n period[\"until\"] = membership.group.board.until\n\n name = membership.group.name\n if data.get(name):\n data[name][\"periods\"].append(period)\n if data[name][\"earliest\"] > membership.since:\n data[name][\"earliest\"] = membership.since\n if data[name][\"latest\"] < membership.until:\n data[name][\"latest\"] = membership.until\n data[name][\"periods\"].sort(key=lambda x: x[\"since\"])\n else:\n data[name] = {\n \"pk\": membership.group.pk,\n \"active\": membership.group.active,\n \"name\": name,\n \"periods\": [period],\n \"url\": settings.BASE_URL + membership.group.get_absolute_url(),\n \"earliest\": membership.since,\n \"latest\": membership.until,\n }\n return data\n\n\ndef member_achievements(member) -> List:\n \"\"\"\n Derives a list of achievements of a member\n Committee and board memberships + mentorships\n \"\"\"\n achievements = _member_group_memberships(\n member,\n lambda membership: (\n hasattr(membership.group, \"board\") or hasattr(membership.group, \"committee\")\n ),\n )\n\n mentor_years = member.mentorship_set.all()\n for mentor_year in mentor_years:\n name = \"Mentor in {}\".format(mentor_year.year)\n # Ensure mentorships appear last but are sorted\n earliest = date.today()\n earliest = earliest.replace(year=earliest.year + mentor_year.year)\n if not achievements.get(name):\n achievements[name] = {\n \"name\": name,\n \"earliest\": earliest,\n }\n return sorted(achievements.values(), key=lambda x: x[\"earliest\"])\n\n\ndef member_societies(member) -> List:\n \"\"\"\n Derives a list of societies a member was part of\n \"\"\"\n societies = _member_group_memberships(\n member, lambda membership: (hasattr(membership.group, \"society\"))\n )\n return sorted(societies.values(), key=lambda x: x[\"earliest\"])\n\n\ndef gen_stats_member_type() -> Dict[str, int]:\n \"\"\"\n Generate a dictionary where every key is a member type with\n the value being the number of current members of that type\n \"\"\"\n\n data = {}\n for key, display in Membership.MEMBERSHIP_TYPES:\n data[str(display)] = (\n Membership.objects.filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n return data\n\n\ndef gen_stats_year() -> Dict[str, Dict[str, int]]:\n \"\"\"\n Generate list with 6 entries, where each entry represents the total amount\n of Thalia members in a year. The sixth element contains all the multi-year\n students.\n \"\"\"\n stats_year = {}\n current_year = datetime_to_lectureyear(date.today())\n\n for i in range(5):\n new = {}\n for key, _ in Membership.MEMBERSHIP_TYPES:\n new[key] = (\n Membership.objects.filter(user__profile__starting_year=current_year - i)\n .filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n stats_year[str(current_year - i)] = new\n\n # Add multi year members\n new = {}\n for key, _ in Membership.MEMBERSHIP_TYPES:\n new[key] = (\n Membership.objects.filter(user__profile__starting_year__lt=current_year - 4)\n .filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n stats_year[str(gettext(\"Older\"))] = new\n\n return stats_year\n\n\ndef verify_email_change(change_request) -> None:\n \"\"\"\n Mark the email change request as verified\n\n :param change_request: the email change request\n \"\"\"\n change_request.verified = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef confirm_email_change(change_request) -> None:\n \"\"\"\n Mark the email change request as verified\n\n :param change_request: the email change request\n \"\"\"\n change_request.confirmed = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef process_email_change(change_request) -> None:\n \"\"\"\n Change the user's email address if the request was completed and\n send the completion email\n\n :param change_request: the email change request\n \"\"\"\n if not change_request.completed:\n return\n\n member = change_request.member\n member.email = change_request.email\n member.save()\n\n emails.send_email_change_completion_message(change_request)\n\n\ndef execute_data_minimisation(dry_run=False, members=None) -> List[Member]:\n \"\"\"\n Clean the profiles of members/users of whom the last membership ended\n at least 31 days ago\n\n :param dry_run: does not really remove data if True\n :param members: queryset of members to process, optional\n :return: list of processed members\n \"\"\"\n if not members:\n members = Member.objects\n members = (\n members.annotate(membership_count=Count(\"membership\"))\n .exclude(\n (\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n & Q(membership_count__gt=0)\n )\n .distinct()\n .prefetch_related(\"membership_set\", \"profile\")\n )\n deletion_period = timezone.now().date() - timezone.timedelta(days=31)\n processed_members = []\n for member in members:\n if (\n member.latest_membership is None\n or member.latest_membership.until <= deletion_period\n ):\n processed_members.append(member)\n profile = member.profile\n profile.student_number = None\n profile.phone_number = None\n profile.address_street = None\n profile.address_street2 = None\n profile.address_postal_code = None\n profile.address_city = None\n profile.address_country = None\n profile.birthday = None\n profile.emergency_contact_phone_number = None\n profile.emergency_contact = None\n member.bank_accounts.all().delete()\n if not dry_run:\n profile.save()\n\n return processed_members\n", "path": "website/members/services.py"}]} | 2,993 | 308 |
gh_patches_debug_10004 | rasdani/github-patches | git_diff | apluslms__a-plus-616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add max_submissions in the course points API endpoint
This API endpoint `/api/v2/courses/COURSEID/points/USERID/` for course points shows the user's points in exercises. It also includes some exercise info that is also provided by the exercise endpoint (`/api/v2/exercises/ID/`). The points API already contains `max_points` and `points_to_pass`, so why not also include `max_submissions` (maximum number of allowed submissions)?
Relevant code:
https://github.com/apluslms/a-plus/blob/d34f94b7c9a8e4d22cd28d08968b36787bb344c4/exercise/api/views.py#L420
https://github.com/apluslms/a-plus/blob/d34f94b7c9a8e4d22cd28d08968b36787bb344c4/exercise/api/custom_serializers.py#L76
</issue>
<code>
[start of exercise/api/serializers.py]
1 from rest_framework import serializers
2 from rest_framework.reverse import reverse
3
4 from lib.api.fields import NestedHyperlinkedIdentityField
5 from lib.api.serializers import AplusModelSerializer, HtmlViewField
6 from userprofile.api.serializers import UserBriefSerializer
7 from ..models import Submission, SubmittedFile, BaseExercise
8
9
10 __all__ = [
11 'ExerciseBriefSerializer',
12 'SubmissionBriefSerializer',
13 'SubmittedFileBriefSerializer',
14 'SubmitterStatsBriefSerializer',
15 ]
16
17
18 class ExerciseBriefSerializer(AplusModelSerializer):
19 url = NestedHyperlinkedIdentityField(
20 view_name='api:exercise-detail',
21 lookup_map='exercise.api.views.ExerciseViewSet',
22 )
23 display_name = serializers.CharField(source='__str__')
24
25 class Meta(AplusModelSerializer.Meta):
26 model = BaseExercise
27 fields = (
28 'url',
29 'html_url',
30 'display_name',
31 )
32
33
34 class SubmissionBriefSerializer(AplusModelSerializer):
35 #display_name = serializers.CharField(source='__str__')
36
37 class Meta(AplusModelSerializer.Meta):
38 model = Submission
39 fields = (
40 'submission_time',
41 )
42 extra_kwargs = {
43 'url': {
44 'view_name': 'api:submission-detail',
45 'lookup_map': 'exercise.api.views.SubmissionViewSet',
46 }
47 }
48
49
50 class SubmittedFileBriefSerializer(AplusModelSerializer):
51 #url = HtmlViewField()
52 url = NestedHyperlinkedIdentityField(
53 view_name='api:submission-files-detail',
54 lookup_map='exercise.api.views.SubmissionFileViewSet',
55 )
56
57 class Meta(AplusModelSerializer.Meta):
58 model = SubmittedFile
59 fields = (
60 'url',
61 'filename',
62 'param_name',
63 )
64
65
66 class SubmitterStatsBriefSerializer(UserBriefSerializer):
67 stats = serializers.SerializerMethodField()
68
69 def get_stats(self, profile):
70 return reverse(
71 'api:exercise-submitter_stats-detail',
72 kwargs={
73 'exercise_id': self.context['view'].exercise.id,
74 'user_id': profile.user.id,
75 },
76 request=self.context['request']
77 )
78
79 class Meta(UserBriefSerializer.Meta):
80 fields = UserBriefSerializer.Meta.fields + (
81 'stats',
82 )
83
[end of exercise/api/serializers.py]
[start of exercise/api/full_serializers.py]
1 from rest_framework import serializers
2
3 from lib.api.fields import NestedHyperlinkedIdentityField
4 from lib.api.serializers import (
5 AlwaysListSerializer,
6 CompositeListSerializer,
7 AplusSerializerMeta,
8 AplusModelSerializerBase,
9 )
10 from course.api.serializers import CourseBriefSerializer
11 from userprofile.api.serializers import UserBriefSerializer, UserListField
12
13 from ..models import Submission
14 from .serializers import (
15 ExerciseBriefSerializer,
16 SubmissionBriefSerializer,
17 SubmittedFileBriefSerializer,
18 )
19
20
21 __all__ = [
22 'ExerciseSerializer',
23 'ExerciseGraderSerializer',
24 'SubmissionSerializer',
25 'SubmissionGraderSerializer',
26 ]
27
28
29 class ExerciseSerializer(ExerciseBriefSerializer):
30 course = CourseBriefSerializer(source='course_instance')
31 post_url = serializers.SerializerMethodField()
32 exercise_info = serializers.JSONField()
33 submissions = NestedHyperlinkedIdentityField(
34 view_name='api:exercise-submissions-list',
35 lookup_map='exercise.api.views.ExerciseViewSet',
36 )
37 my_submissions = NestedHyperlinkedIdentityField(
38 view_name='api:exercise-submissions-detail',
39 lookup_map={
40 'exercise_id': 'id',
41 'user_id': lambda o=None: 'me',
42 },
43 )
44 my_stats = NestedHyperlinkedIdentityField(
45 view_name='api:exercise-submitter_stats-detail',
46 lookup_map={
47 'exercise_id': 'id',
48 'user_id': lambda o=None: 'me',
49 },
50 )
51
52 def get_post_url(self, obj):
53 # FIXME: obj should implement .get_post_url() and that should be used here
54 if obj.is_submittable:
55 request = self.context['request']
56 url = obj.get_url("exercise")
57 return request.build_absolute_uri(url)
58 return None
59
60 class Meta(ExerciseBriefSerializer.Meta):
61 fields = (
62 'name',
63 'course',
64 'is_submittable',
65 'post_url',
66 'max_points',
67 'max_submissions',
68 'exercise_info',
69 'templates',
70 'submissions',
71 'my_submissions',
72 'my_stats',
73 )
74
75
76 class ExerciseGraderSerializer(AplusModelSerializerBase):
77 url = NestedHyperlinkedIdentityField(
78 view_name='api:exercise-grader',
79 lookup_map='exercise.api.views.ExerciseViewSet',
80 )
81 exercise = ExerciseBriefSerializer(source='*')
82
83 class Meta(AplusSerializerMeta):
84 model = Submission
85 fields = (
86 'url',
87 'exercise',
88 )
89
90
91 class SubmitterLinks(AlwaysListSerializer, UserBriefSerializer):
92 pass
93
94
95 class SubmittedFileLinks(AlwaysListSerializer, SubmittedFileBriefSerializer):
96 pass
97
98
99 class SubmissionSerializer(SubmissionBriefSerializer):
100 exercise = ExerciseBriefSerializer()
101 submitters = SubmitterLinks()
102 submission_data = serializers.JSONField()
103 files = SubmittedFileLinks()
104 grader = UserBriefSerializer()
105 grading_data = serializers.JSONField()
106
107 class Meta(SubmissionBriefSerializer.Meta):
108 fields = (
109 'html_url',
110 'exercise',
111 'submitters',
112 'submission_data',
113 'files',
114 'status',
115 'grade',
116 'late_penalty_applied',
117 'grading_time',
118 'grader',
119 'feedback',
120 'assistant_feedback',
121 'grading_data',
122 )
123
124
125 class SubmissionInGraderSerializer(SubmissionBriefSerializer):
126 class Meta(SubmissionBriefSerializer.Meta):
127 fields = (
128 'html_url',
129 )
130
131
132 class SubmissionGraderSerializer(AplusModelSerializerBase):
133 url = NestedHyperlinkedIdentityField(
134 view_name='api:submission-grader',
135 lookup_map='exercise.api.views.SubmissionViewSet',
136 )
137 submission = SubmissionInGraderSerializer(source='*')
138 exercise = ExerciseBriefSerializer()
139
140 class Meta(AplusSerializerMeta):
141 model = Submission
142 fields = (
143 'url',
144 'submission',
145 'exercise',
146 'grading_data',
147 'is_graded',
148 )
149
[end of exercise/api/full_serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/exercise/api/full_serializers.py b/exercise/api/full_serializers.py
--- a/exercise/api/full_serializers.py
+++ b/exercise/api/full_serializers.py
@@ -63,8 +63,6 @@
'course',
'is_submittable',
'post_url',
- 'max_points',
- 'max_submissions',
'exercise_info',
'templates',
'submissions',
diff --git a/exercise/api/serializers.py b/exercise/api/serializers.py
--- a/exercise/api/serializers.py
+++ b/exercise/api/serializers.py
@@ -28,6 +28,8 @@
'url',
'html_url',
'display_name',
+ 'max_points',
+ 'max_submissions',
)
| {"golden_diff": "diff --git a/exercise/api/full_serializers.py b/exercise/api/full_serializers.py\n--- a/exercise/api/full_serializers.py\n+++ b/exercise/api/full_serializers.py\n@@ -63,8 +63,6 @@\n 'course',\n 'is_submittable',\n 'post_url',\n- 'max_points',\n- 'max_submissions',\n 'exercise_info',\n 'templates',\n 'submissions',\ndiff --git a/exercise/api/serializers.py b/exercise/api/serializers.py\n--- a/exercise/api/serializers.py\n+++ b/exercise/api/serializers.py\n@@ -28,6 +28,8 @@\n 'url',\n 'html_url',\n 'display_name',\n+ 'max_points',\n+ 'max_submissions',\n )\n", "issue": "Add max_submissions in the course points API endpoint\nThis API endpoint `/api/v2/courses/COURSEID/points/USERID/` for course points shows the user's points in exercises. It also includes some exercise info that is also provided by the exercise endpoint (`/api/v2/exercises/ID/`). The points API already contains `max_points` and `points_to_pass`, so why not also include `max_submissions` (maximum number of allowed submissions)?\r\n\r\nRelevant code:\r\nhttps://github.com/apluslms/a-plus/blob/d34f94b7c9a8e4d22cd28d08968b36787bb344c4/exercise/api/views.py#L420\r\nhttps://github.com/apluslms/a-plus/blob/d34f94b7c9a8e4d22cd28d08968b36787bb344c4/exercise/api/custom_serializers.py#L76\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom lib.api.fields import NestedHyperlinkedIdentityField\nfrom lib.api.serializers import AplusModelSerializer, HtmlViewField\nfrom userprofile.api.serializers import UserBriefSerializer\nfrom ..models import Submission, SubmittedFile, BaseExercise\n\n\n__all__ = [\n 'ExerciseBriefSerializer',\n 'SubmissionBriefSerializer',\n 'SubmittedFileBriefSerializer',\n 'SubmitterStatsBriefSerializer',\n]\n\n\nclass ExerciseBriefSerializer(AplusModelSerializer):\n url = NestedHyperlinkedIdentityField(\n view_name='api:exercise-detail',\n lookup_map='exercise.api.views.ExerciseViewSet',\n )\n display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = BaseExercise\n fields = (\n 'url',\n 'html_url',\n 'display_name',\n )\n\n\nclass SubmissionBriefSerializer(AplusModelSerializer):\n #display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = Submission\n fields = (\n 'submission_time',\n )\n extra_kwargs = {\n 'url': {\n 'view_name': 'api:submission-detail',\n 'lookup_map': 'exercise.api.views.SubmissionViewSet',\n }\n }\n\n\nclass SubmittedFileBriefSerializer(AplusModelSerializer):\n #url = HtmlViewField()\n url = NestedHyperlinkedIdentityField(\n view_name='api:submission-files-detail',\n lookup_map='exercise.api.views.SubmissionFileViewSet',\n )\n\n class Meta(AplusModelSerializer.Meta):\n model = SubmittedFile\n fields = (\n 'url',\n 'filename',\n 'param_name',\n )\n\n\nclass SubmitterStatsBriefSerializer(UserBriefSerializer):\n stats = serializers.SerializerMethodField()\n\n def get_stats(self, profile):\n return reverse(\n 'api:exercise-submitter_stats-detail',\n kwargs={\n 'exercise_id': self.context['view'].exercise.id,\n 'user_id': profile.user.id,\n },\n request=self.context['request']\n )\n\n class Meta(UserBriefSerializer.Meta):\n fields = UserBriefSerializer.Meta.fields + (\n 'stats',\n )\n", "path": "exercise/api/serializers.py"}, {"content": "from rest_framework import serializers\n\nfrom lib.api.fields import NestedHyperlinkedIdentityField\nfrom lib.api.serializers import (\n AlwaysListSerializer,\n CompositeListSerializer,\n AplusSerializerMeta,\n AplusModelSerializerBase,\n)\nfrom course.api.serializers import CourseBriefSerializer\nfrom userprofile.api.serializers import UserBriefSerializer, UserListField\n\nfrom ..models import Submission\nfrom .serializers import (\n ExerciseBriefSerializer,\n SubmissionBriefSerializer,\n SubmittedFileBriefSerializer,\n)\n\n\n__all__ = [\n 'ExerciseSerializer',\n 'ExerciseGraderSerializer',\n 'SubmissionSerializer',\n 'SubmissionGraderSerializer',\n]\n\n\nclass ExerciseSerializer(ExerciseBriefSerializer):\n course = CourseBriefSerializer(source='course_instance')\n post_url = serializers.SerializerMethodField()\n exercise_info = serializers.JSONField()\n submissions = NestedHyperlinkedIdentityField(\n view_name='api:exercise-submissions-list',\n lookup_map='exercise.api.views.ExerciseViewSet',\n )\n my_submissions = NestedHyperlinkedIdentityField(\n view_name='api:exercise-submissions-detail',\n lookup_map={\n 'exercise_id': 'id',\n 'user_id': lambda o=None: 'me',\n },\n )\n my_stats = NestedHyperlinkedIdentityField(\n view_name='api:exercise-submitter_stats-detail',\n lookup_map={\n 'exercise_id': 'id',\n 'user_id': lambda o=None: 'me',\n },\n )\n\n def get_post_url(self, obj):\n # FIXME: obj should implement .get_post_url() and that should be used here\n if obj.is_submittable:\n request = self.context['request']\n url = obj.get_url(\"exercise\")\n return request.build_absolute_uri(url)\n return None\n\n class Meta(ExerciseBriefSerializer.Meta):\n fields = (\n 'name',\n 'course',\n 'is_submittable',\n 'post_url',\n 'max_points',\n 'max_submissions',\n 'exercise_info',\n 'templates',\n 'submissions',\n 'my_submissions',\n 'my_stats',\n )\n\n\nclass ExerciseGraderSerializer(AplusModelSerializerBase):\n url = NestedHyperlinkedIdentityField(\n view_name='api:exercise-grader',\n lookup_map='exercise.api.views.ExerciseViewSet',\n )\n exercise = ExerciseBriefSerializer(source='*')\n\n class Meta(AplusSerializerMeta):\n model = Submission\n fields = (\n 'url',\n 'exercise',\n )\n\n\nclass SubmitterLinks(AlwaysListSerializer, UserBriefSerializer):\n pass\n\n\nclass SubmittedFileLinks(AlwaysListSerializer, SubmittedFileBriefSerializer):\n pass\n\n\nclass SubmissionSerializer(SubmissionBriefSerializer):\n exercise = ExerciseBriefSerializer()\n submitters = SubmitterLinks()\n submission_data = serializers.JSONField()\n files = SubmittedFileLinks()\n grader = UserBriefSerializer()\n grading_data = serializers.JSONField()\n\n class Meta(SubmissionBriefSerializer.Meta):\n fields = (\n 'html_url',\n 'exercise',\n 'submitters',\n 'submission_data',\n 'files',\n 'status',\n 'grade',\n 'late_penalty_applied',\n 'grading_time',\n 'grader',\n 'feedback',\n 'assistant_feedback',\n 'grading_data',\n )\n\n\nclass SubmissionInGraderSerializer(SubmissionBriefSerializer):\n class Meta(SubmissionBriefSerializer.Meta):\n fields = (\n 'html_url',\n )\n\n\nclass SubmissionGraderSerializer(AplusModelSerializerBase):\n url = NestedHyperlinkedIdentityField(\n view_name='api:submission-grader',\n lookup_map='exercise.api.views.SubmissionViewSet',\n )\n submission = SubmissionInGraderSerializer(source='*')\n exercise = ExerciseBriefSerializer()\n\n class Meta(AplusSerializerMeta):\n model = Submission\n fields = (\n 'url',\n 'submission',\n 'exercise',\n 'grading_data',\n 'is_graded',\n )\n", "path": "exercise/api/full_serializers.py"}]} | 2,583 | 176 |
gh_patches_debug_25769 | rasdani/github-patches | git_diff | encode__starlette-1401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
templateing: jinja2: pass kwargs for environment
I think it would be good to pass something like `env_kwargs` via https://github.com/blueyed/starlette/blob/24c135de71ac56a73f7f797258115941579155bf/starlette/templating.py#L51-L53.
While you can change the env afterwards, it would allow Jinja2 to validate e.g. `enable_async`, and call `load_extensions` etc.
</issue>
<code>
[start of starlette/templating.py]
1 import typing
2 from os import PathLike
3
4 from starlette.background import BackgroundTask
5 from starlette.responses import Response
6 from starlette.types import Receive, Scope, Send
7
8 try:
9 import jinja2
10
11 # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1
12 if hasattr(jinja2, "pass_context"):
13 pass_context = jinja2.pass_context
14 else: # pragma: nocover
15 pass_context = jinja2.contextfunction
16 except ImportError: # pragma: nocover
17 jinja2 = None # type: ignore
18
19
20 class _TemplateResponse(Response):
21 media_type = "text/html"
22
23 def __init__(
24 self,
25 template: typing.Any,
26 context: dict,
27 status_code: int = 200,
28 headers: dict = None,
29 media_type: str = None,
30 background: BackgroundTask = None,
31 ):
32 self.template = template
33 self.context = context
34 content = template.render(context)
35 super().__init__(content, status_code, headers, media_type, background)
36
37 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
38 request = self.context.get("request", {})
39 extensions = request.get("extensions", {})
40 if "http.response.template" in extensions:
41 await send(
42 {
43 "type": "http.response.template",
44 "template": self.template,
45 "context": self.context,
46 }
47 )
48 await super().__call__(scope, receive, send)
49
50
51 class Jinja2Templates:
52 """
53 templates = Jinja2Templates("templates")
54
55 return templates.TemplateResponse("index.html", {"request": request})
56 """
57
58 def __init__(self, directory: typing.Union[str, PathLike]) -> None:
59 assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates"
60 self.env = self._create_env(directory)
61
62 def _create_env(
63 self, directory: typing.Union[str, PathLike]
64 ) -> "jinja2.Environment":
65 @pass_context
66 def url_for(context: dict, name: str, **path_params: typing.Any) -> str:
67 request = context["request"]
68 return request.url_for(name, **path_params)
69
70 loader = jinja2.FileSystemLoader(directory)
71 env = jinja2.Environment(loader=loader, autoescape=True)
72 env.globals["url_for"] = url_for
73 return env
74
75 def get_template(self, name: str) -> "jinja2.Template":
76 return self.env.get_template(name)
77
78 def TemplateResponse(
79 self,
80 name: str,
81 context: dict,
82 status_code: int = 200,
83 headers: dict = None,
84 media_type: str = None,
85 background: BackgroundTask = None,
86 ) -> _TemplateResponse:
87 if "request" not in context:
88 raise ValueError('context must include a "request" key')
89 template = self.get_template(name)
90 return _TemplateResponse(
91 template,
92 context,
93 status_code=status_code,
94 headers=headers,
95 media_type=media_type,
96 background=background,
97 )
98
[end of starlette/templating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/templating.py b/starlette/templating.py
--- a/starlette/templating.py
+++ b/starlette/templating.py
@@ -55,12 +55,14 @@
return templates.TemplateResponse("index.html", {"request": request})
"""
- def __init__(self, directory: typing.Union[str, PathLike]) -> None:
+ def __init__(
+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any
+ ) -> None:
assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates"
- self.env = self._create_env(directory)
+ self.env = self._create_env(directory, **env_options)
def _create_env(
- self, directory: typing.Union[str, PathLike]
+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any
) -> "jinja2.Environment":
@pass_context
def url_for(context: dict, name: str, **path_params: typing.Any) -> str:
@@ -68,7 +70,10 @@
return request.url_for(name, **path_params)
loader = jinja2.FileSystemLoader(directory)
- env = jinja2.Environment(loader=loader, autoescape=True)
+ env_options.setdefault("loader", loader)
+ env_options.setdefault("autoescape", True)
+
+ env = jinja2.Environment(**env_options)
env.globals["url_for"] = url_for
return env
| {"golden_diff": "diff --git a/starlette/templating.py b/starlette/templating.py\n--- a/starlette/templating.py\n+++ b/starlette/templating.py\n@@ -55,12 +55,14 @@\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n \"\"\"\n \n- def __init__(self, directory: typing.Union[str, PathLike]) -> None:\n+ def __init__(\n+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any\n+ ) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n- self.env = self._create_env(directory)\n+ self.env = self._create_env(directory, **env_options)\n \n def _create_env(\n- self, directory: typing.Union[str, PathLike]\n+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any\n ) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n@@ -68,7 +70,10 @@\n return request.url_for(name, **path_params)\n \n loader = jinja2.FileSystemLoader(directory)\n- env = jinja2.Environment(loader=loader, autoescape=True)\n+ env_options.setdefault(\"loader\", loader)\n+ env_options.setdefault(\"autoescape\", True)\n+\n+ env = jinja2.Environment(**env_options)\n env.globals[\"url_for\"] = url_for\n return env\n", "issue": "templateing: jinja2: pass kwargs for environment\nI think it would be good to pass something like `env_kwargs` via https://github.com/blueyed/starlette/blob/24c135de71ac56a73f7f797258115941579155bf/starlette/templating.py#L51-L53.\r\n\r\nWhile you can change the env afterwards, it would allow Jinja2 to validate e.g. `enable_async`, and call `load_extensions` etc.\n", "before_files": [{"content": "import typing\nfrom os import PathLike\n\nfrom starlette.background import BackgroundTask\nfrom starlette.responses import Response\nfrom starlette.types import Receive, Scope, Send\n\ntry:\n import jinja2\n\n # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1\n if hasattr(jinja2, \"pass_context\"):\n pass_context = jinja2.pass_context\n else: # pragma: nocover\n pass_context = jinja2.contextfunction\nexcept ImportError: # pragma: nocover\n jinja2 = None # type: ignore\n\n\nclass _TemplateResponse(Response):\n media_type = \"text/html\"\n\n def __init__(\n self,\n template: typing.Any,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ):\n self.template = template\n self.context = context\n content = template.render(context)\n super().__init__(content, status_code, headers, media_type, background)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n request = self.context.get(\"request\", {})\n extensions = request.get(\"extensions\", {})\n if \"http.response.template\" in extensions:\n await send(\n {\n \"type\": \"http.response.template\",\n \"template\": self.template,\n \"context\": self.context,\n }\n )\n await super().__call__(scope, receive, send)\n\n\nclass Jinja2Templates:\n \"\"\"\n templates = Jinja2Templates(\"templates\")\n\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n \"\"\"\n\n def __init__(self, directory: typing.Union[str, PathLike]) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n self.env = self._create_env(directory)\n\n def _create_env(\n self, directory: typing.Union[str, PathLike]\n ) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n request = context[\"request\"]\n return request.url_for(name, **path_params)\n\n loader = jinja2.FileSystemLoader(directory)\n env = jinja2.Environment(loader=loader, autoescape=True)\n env.globals[\"url_for\"] = url_for\n return env\n\n def get_template(self, name: str) -> \"jinja2.Template\":\n return self.env.get_template(name)\n\n def TemplateResponse(\n self,\n name: str,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> _TemplateResponse:\n if \"request\" not in context:\n raise ValueError('context must include a \"request\" key')\n template = self.get_template(name)\n return _TemplateResponse(\n template,\n context,\n status_code=status_code,\n headers=headers,\n media_type=media_type,\n background=background,\n )\n", "path": "starlette/templating.py"}]} | 1,545 | 349 |
gh_patches_debug_11341 | rasdani/github-patches | git_diff | LMFDB__lmfdb-5629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dirichlet character server error
The page https://www.lmfdb.org/Character/Dirichlet/68800/66667 is throwing a server error
</issue>
<code>
[start of lmfdb/characters/TinyConrey.py]
1 from sage.all import (gcd, Mod, Integer, Integers, Rational, pari,
2 DirichletGroup, CyclotomicField, euler_phi, lcm)
3 from sage.misc.cachefunc import cached_method
4 from sage.modular.dirichlet import DirichletCharacter
5 from lmfdb.logger import make_logger
6 logger = make_logger("TinyConrey")
7
8 def symbol_numerator(cond, parity):
9 # Reference: Sect. 9.3, Montgomery, Hugh L; Vaughan, Robert C. (2007).
10 # Multiplicative number theory. I. Classical theory. Cambridge Studies in
11 # Advanced Mathematics 97
12 #
13 # Let F = Q(\sqrt(d)) with d a non zero squarefree integer then a real
14 # Dirichlet character \chi(n) can be represented as a Kronecker symbol
15 # (m / n) where { m = d if # d = 1 mod 4 else m = 4d if d = 2,3 (mod) 4 }
16 # and m is the discriminant of F. The conductor of \chi is |m|.
17 #
18 # symbol_numerator returns the appropriate Kronecker symbol depending on
19 # the conductor of \chi.
20 m = cond
21 if cond % 2 == 1:
22 if cond % 4 == 3:
23 m = -cond
24 elif cond % 8 == 4:
25 # Fixed cond % 16 == 4 and cond % 16 == 12 were switched in the
26 # previous version of the code.
27 #
28 # Let d be a non zero squarefree integer. If d = 2,3 (mod) 4 and if
29 # cond = 4d = 4 ( 4n + 2) or 4 (4n + 3) = 16 n + 8 or 16n + 12 then we
30 # set m = cond. On the other hand if d = 1 (mod) 4 and cond = 4d = 4
31 # (4n +1) = 16n + 4 then we set m = -cond.
32 if cond % 16 == 4:
33 m = -cond
34 elif cond % 16 == 8:
35 if parity == 1:
36 m = -cond
37 else:
38 return None
39 return m
40
41
42 def kronecker_symbol(m):
43 if m:
44 return r'\(\displaystyle\left(\frac{%s}{\bullet}\right)\)' % (m)
45 else:
46 return None
47
48 ###############################################################################
49 # Conrey character with no call to Jonathan's code
50 # in order to handle big moduli
51
52
53 def get_sage_genvalues(modulus, order, genvalues, zeta_order):
54 """
55 Helper method for computing correct genvalues when constructing
56 the sage character
57 """
58 phi_mod = euler_phi(modulus)
59 exponent_factor = phi_mod / order
60 genvalues_exponent = (x * exponent_factor for x in genvalues)
61 return [x * zeta_order / phi_mod for x in genvalues_exponent]
62
63
64 class PariConreyGroup():
65
66 def __init__(self, modulus):
67 self.modulus = int(modulus)
68 self.G = pari(f"znstar({modulus},1)")
69
70 def gens(self):
71 return Integers(self.modulus).unit_gens()
72
73 def invariants(self):
74 return pari(f"{self.G}.cyc")
75
76 @cached_method
77 def first_chars(self, limit=31):
78 if self.modulus == 1:
79 return [1]
80 r = []
81 for i,c in enumerate(Integers(self.modulus).list_of_elements_of_multiplicative_group()):
82 r.append(c)
83 if i > limit:
84 self.rowtruncate = True
85 break
86 return r
87
88 @cached_method
89 def first_chars_with_orbit(self, limit=31):
90 """ would be nice to compute those directly
91 instead of querying each to db
92 """
93 pass
94
95
96 class ConreyCharacter():
97 """
98 minimal implementation of character from its Conrey index
99 use Pari/GP functions when available
100 """
101
102 def __init__(self, modulus, number):
103 assert gcd(modulus, number)==1
104 self.modulus = Integer(modulus)
105 self.number = Integer(number)
106 self.conrey = Mod(number,modulus)
107 self.G = pari("znstar({},1)".format(modulus))
108 self.G_gens = Integers(self.modulus).unit_gens() # use sage generators
109 self.chi_pari = self.G.znconreylog(self.number)
110 self.chi_0 = None
111 self.indlabel = None
112
113 @property
114 def texname(self):
115 from lmfdb.characters.web_character import WebDirichlet
116 return WebDirichlet.char2tex(self.modulus, self.number)
117
118 @cached_method
119 def modfactor(self):
120 return self.modulus.factor()
121
122 @cached_method
123 def conductor(self):
124 B = pari(f"znconreyconductor({self.G},{self.chi_pari},&chi0)")
125 if B.type() == 't_INT':
126 # means chi is primitive
127 self.chi_0 = self.chi_pari
128 self.indlabel = self.number
129 return int(B)
130 else:
131 self.chi_0 = pari("chi0")
132 G_0 = pari(f"znstar({B},1)")
133 self.indlabel = int(G_0.znconreyexp(self.chi_0))
134 return int(B[0])
135
136 @cached_method
137 def is_primitive(self):
138 return self.conductor() == self.modulus
139
140 @cached_method
141 def parity(self):
142 return self.G.zncharisodd(self.chi_pari)
143
144 def is_odd(self):
145 return self.parity() == 1
146
147 def is_even(self):
148 return self.parity() == 0
149
150 @property
151 def order(self):
152 return self.conrey.multiplicative_order()
153
154 @property
155 def genvalues(self):
156 # This assumes that the generators are ordered in the way
157 # that Sage returns
158 return [self.conreyangle(k) * self.order for k in self.G_gens]
159
160 @property
161 def values_gens(self):
162 # This may be considered the full version of genvalues;
163 # that is, it returns both the generators as well as the values
164 # at those generators
165 return [[k, self.conreyangle(k) * self.order] for k in self.G_gens]
166
167 @cached_method
168 def kronecker_symbol(self):
169 c = self.conductor()
170 p = self.parity()
171 return kronecker_symbol(symbol_numerator(c, p))
172
173 def conreyangle(self,x):
174 return Rational(self.G.chareval(self.chi_pari,x))
175
176 def gauss_sum_numerical(self, a):
177 # There seems to be a bug in pari when a is a multiple of the modulus,
178 # so we deal with that separately
179 if self.modulus.divides(a):
180 if self.conductor() == 1:
181 return euler_phi(self.modulus)
182 else:
183 return Integer(0)
184 else:
185 return self.G.znchargauss(self.chi_pari,a)
186
187 def sage_zeta_order(self, order):
188 return 1 if self.modulus <= 2 else lcm(2,order)
189
190 def sage_character(self, order=None, genvalues=None):
191
192 if order is None:
193 order = self.order
194
195 if genvalues is None:
196 genvalues = self.genvalues
197
198 H = DirichletGroup(self.modulus, base_ring=CyclotomicField(self.sage_zeta_order(order)))
199 M = H._module
200 order_corrected_genvalues = get_sage_genvalues(self.modulus, order, genvalues, self.sage_zeta_order(order))
201 return DirichletCharacter(H,M(order_corrected_genvalues))
202
203 @cached_method
204 def galois_orbit(self, limit=31):
205 """
206 orbit under Galois of the value field,
207 can be used to find first conjugate or list of first conjugates
208 """
209 logger.debug(f"## galois_orbit({limit})")
210 order = self.order
211 if order == 1:
212 return [1]
213 elif order < limit or order * order < limit * self.modulus:
214 logger.debug(f"compute all conjugate characters and return first {limit}")
215 return self.galois_orbit_all(limit)
216 elif limit == 1 or self.modulus < 30 * order:
217 logger.debug(f"compute {limit} first conjugate characters")
218 return self.galois_orbit_search(limit)
219 else:
220 logger.debug(f"galois orbit of size {order} too expansive, give up")
221 return []
222
223 def galois_orbit_all(self, limit=31):
224 # construct all Galois orbit, assume not too large
225 order = self.order
226 chik = self.conrey
227 output = []
228 for k in range(1,order):
229 if gcd(k,order) == 1:
230 output.append(Integer(chik))
231 chik *= self.conrey
232 output.sort()
233 return output[:limit]
234
235 def galois_orbit_search(self, limit=31):
236 # fishing strategy, assume orbit relatively dense
237 order = self.order
238 num = self.number
239 mod = self.modulus
240 kmin = 1
241 width = kmax = min(mod,limit * 50)
242 while True:
243 cmd = f"a=Mod({num},{mod});my(valid(k)=my(l=znlog(k,a,{order}));l&&gcd(l,{order})==1);[ k | k <- [{kmin}..{kmax}], gcd(k,{mod})==1 && valid(k) ]"
244 ans = [Integer(m) for m in pari(cmd)[:limit]]
245 if ans:
246 return ans
247 kmin += width
248 kmax += width
249
250 @property
251 def min_conrey_conj(self):
252 return self.galois_orbit(1)[0]
253
254 @cached_method
255 def kernel_field_poly(self):
256 pol = self.G.galoissubcyclo(self.G.charker(self.chi_pari))
257 if self.order <= 12:
258 pol = pol.polredabs()
259 return pol
260
[end of lmfdb/characters/TinyConrey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lmfdb/characters/TinyConrey.py b/lmfdb/characters/TinyConrey.py
--- a/lmfdb/characters/TinyConrey.py
+++ b/lmfdb/characters/TinyConrey.py
@@ -213,7 +213,7 @@
elif order < limit or order * order < limit * self.modulus:
logger.debug(f"compute all conjugate characters and return first {limit}")
return self.galois_orbit_all(limit)
- elif limit == 1 or self.modulus < 30 * order:
+ elif limit == 1 or self.modulus <= 1000000:
logger.debug(f"compute {limit} first conjugate characters")
return self.galois_orbit_search(limit)
else:
| {"golden_diff": "diff --git a/lmfdb/characters/TinyConrey.py b/lmfdb/characters/TinyConrey.py\n--- a/lmfdb/characters/TinyConrey.py\n+++ b/lmfdb/characters/TinyConrey.py\n@@ -213,7 +213,7 @@\n elif order < limit or order * order < limit * self.modulus:\n logger.debug(f\"compute all conjugate characters and return first {limit}\")\n return self.galois_orbit_all(limit)\n- elif limit == 1 or self.modulus < 30 * order:\n+ elif limit == 1 or self.modulus <= 1000000:\n logger.debug(f\"compute {limit} first conjugate characters\")\n return self.galois_orbit_search(limit)\n else:\n", "issue": "Dirichlet character server error\nThe page https://www.lmfdb.org/Character/Dirichlet/68800/66667 is throwing a server error\n", "before_files": [{"content": "from sage.all import (gcd, Mod, Integer, Integers, Rational, pari,\n DirichletGroup, CyclotomicField, euler_phi, lcm)\nfrom sage.misc.cachefunc import cached_method\nfrom sage.modular.dirichlet import DirichletCharacter\nfrom lmfdb.logger import make_logger\nlogger = make_logger(\"TinyConrey\")\n\ndef symbol_numerator(cond, parity):\n # Reference: Sect. 9.3, Montgomery, Hugh L; Vaughan, Robert C. (2007).\n # Multiplicative number theory. I. Classical theory. Cambridge Studies in\n # Advanced Mathematics 97\n #\n # Let F = Q(\\sqrt(d)) with d a non zero squarefree integer then a real\n # Dirichlet character \\chi(n) can be represented as a Kronecker symbol\n # (m / n) where { m = d if # d = 1 mod 4 else m = 4d if d = 2,3 (mod) 4 }\n # and m is the discriminant of F. The conductor of \\chi is |m|.\n #\n # symbol_numerator returns the appropriate Kronecker symbol depending on\n # the conductor of \\chi.\n m = cond\n if cond % 2 == 1:\n if cond % 4 == 3:\n m = -cond\n elif cond % 8 == 4:\n # Fixed cond % 16 == 4 and cond % 16 == 12 were switched in the\n # previous version of the code.\n #\n # Let d be a non zero squarefree integer. If d = 2,3 (mod) 4 and if\n # cond = 4d = 4 ( 4n + 2) or 4 (4n + 3) = 16 n + 8 or 16n + 12 then we\n # set m = cond. On the other hand if d = 1 (mod) 4 and cond = 4d = 4\n # (4n +1) = 16n + 4 then we set m = -cond.\n if cond % 16 == 4:\n m = -cond\n elif cond % 16 == 8:\n if parity == 1:\n m = -cond\n else:\n return None\n return m\n\n\ndef kronecker_symbol(m):\n if m:\n return r'\\(\\displaystyle\\left(\\frac{%s}{\\bullet}\\right)\\)' % (m)\n else:\n return None\n\n###############################################################################\n# Conrey character with no call to Jonathan's code\n# in order to handle big moduli\n\n\ndef get_sage_genvalues(modulus, order, genvalues, zeta_order):\n \"\"\"\n Helper method for computing correct genvalues when constructing\n the sage character\n \"\"\"\n phi_mod = euler_phi(modulus)\n exponent_factor = phi_mod / order\n genvalues_exponent = (x * exponent_factor for x in genvalues)\n return [x * zeta_order / phi_mod for x in genvalues_exponent]\n\n\nclass PariConreyGroup():\n\n def __init__(self, modulus):\n self.modulus = int(modulus)\n self.G = pari(f\"znstar({modulus},1)\")\n\n def gens(self):\n return Integers(self.modulus).unit_gens()\n\n def invariants(self):\n return pari(f\"{self.G}.cyc\")\n\n @cached_method\n def first_chars(self, limit=31):\n if self.modulus == 1:\n return [1]\n r = []\n for i,c in enumerate(Integers(self.modulus).list_of_elements_of_multiplicative_group()):\n r.append(c)\n if i > limit:\n self.rowtruncate = True\n break\n return r\n\n @cached_method\n def first_chars_with_orbit(self, limit=31):\n \"\"\" would be nice to compute those directly\n instead of querying each to db\n \"\"\"\n pass\n\n\nclass ConreyCharacter():\n \"\"\"\n minimal implementation of character from its Conrey index\n use Pari/GP functions when available\n \"\"\"\n\n def __init__(self, modulus, number):\n assert gcd(modulus, number)==1\n self.modulus = Integer(modulus)\n self.number = Integer(number)\n self.conrey = Mod(number,modulus)\n self.G = pari(\"znstar({},1)\".format(modulus))\n self.G_gens = Integers(self.modulus).unit_gens() # use sage generators\n self.chi_pari = self.G.znconreylog(self.number)\n self.chi_0 = None\n self.indlabel = None\n\n @property\n def texname(self):\n from lmfdb.characters.web_character import WebDirichlet\n return WebDirichlet.char2tex(self.modulus, self.number)\n\n @cached_method\n def modfactor(self):\n return self.modulus.factor()\n\n @cached_method\n def conductor(self):\n B = pari(f\"znconreyconductor({self.G},{self.chi_pari},&chi0)\")\n if B.type() == 't_INT':\n # means chi is primitive\n self.chi_0 = self.chi_pari\n self.indlabel = self.number\n return int(B)\n else:\n self.chi_0 = pari(\"chi0\")\n G_0 = pari(f\"znstar({B},1)\")\n self.indlabel = int(G_0.znconreyexp(self.chi_0))\n return int(B[0])\n\n @cached_method\n def is_primitive(self):\n return self.conductor() == self.modulus\n\n @cached_method\n def parity(self):\n return self.G.zncharisodd(self.chi_pari)\n\n def is_odd(self):\n return self.parity() == 1\n\n def is_even(self):\n return self.parity() == 0\n\n @property\n def order(self):\n return self.conrey.multiplicative_order()\n\n @property\n def genvalues(self):\n # This assumes that the generators are ordered in the way\n # that Sage returns\n return [self.conreyangle(k) * self.order for k in self.G_gens]\n\n @property\n def values_gens(self):\n # This may be considered the full version of genvalues;\n # that is, it returns both the generators as well as the values\n # at those generators\n return [[k, self.conreyangle(k) * self.order] for k in self.G_gens]\n\n @cached_method\n def kronecker_symbol(self):\n c = self.conductor()\n p = self.parity()\n return kronecker_symbol(symbol_numerator(c, p))\n\n def conreyangle(self,x):\n return Rational(self.G.chareval(self.chi_pari,x))\n\n def gauss_sum_numerical(self, a):\n # There seems to be a bug in pari when a is a multiple of the modulus,\n # so we deal with that separately\n if self.modulus.divides(a):\n if self.conductor() == 1:\n return euler_phi(self.modulus)\n else:\n return Integer(0)\n else:\n return self.G.znchargauss(self.chi_pari,a)\n\n def sage_zeta_order(self, order):\n return 1 if self.modulus <= 2 else lcm(2,order)\n\n def sage_character(self, order=None, genvalues=None):\n\n if order is None:\n order = self.order\n\n if genvalues is None:\n genvalues = self.genvalues\n\n H = DirichletGroup(self.modulus, base_ring=CyclotomicField(self.sage_zeta_order(order)))\n M = H._module\n order_corrected_genvalues = get_sage_genvalues(self.modulus, order, genvalues, self.sage_zeta_order(order))\n return DirichletCharacter(H,M(order_corrected_genvalues))\n\n @cached_method\n def galois_orbit(self, limit=31):\n \"\"\"\n orbit under Galois of the value field,\n can be used to find first conjugate or list of first conjugates\n \"\"\"\n logger.debug(f\"## galois_orbit({limit})\")\n order = self.order\n if order == 1:\n return [1]\n elif order < limit or order * order < limit * self.modulus:\n logger.debug(f\"compute all conjugate characters and return first {limit}\")\n return self.galois_orbit_all(limit)\n elif limit == 1 or self.modulus < 30 * order:\n logger.debug(f\"compute {limit} first conjugate characters\")\n return self.galois_orbit_search(limit)\n else:\n logger.debug(f\"galois orbit of size {order} too expansive, give up\")\n return []\n\n def galois_orbit_all(self, limit=31):\n # construct all Galois orbit, assume not too large\n order = self.order\n chik = self.conrey\n output = []\n for k in range(1,order):\n if gcd(k,order) == 1:\n output.append(Integer(chik))\n chik *= self.conrey\n output.sort()\n return output[:limit]\n\n def galois_orbit_search(self, limit=31):\n # fishing strategy, assume orbit relatively dense\n order = self.order\n num = self.number\n mod = self.modulus\n kmin = 1\n width = kmax = min(mod,limit * 50)\n while True:\n cmd = f\"a=Mod({num},{mod});my(valid(k)=my(l=znlog(k,a,{order}));l&&gcd(l,{order})==1);[ k | k <- [{kmin}..{kmax}], gcd(k,{mod})==1 && valid(k) ]\"\n ans = [Integer(m) for m in pari(cmd)[:limit]]\n if ans:\n return ans\n kmin += width\n kmax += width\n\n @property\n def min_conrey_conj(self):\n return self.galois_orbit(1)[0]\n\n @cached_method\n def kernel_field_poly(self):\n pol = self.G.galoissubcyclo(self.G.charker(self.chi_pari))\n if self.order <= 12:\n pol = pol.polredabs()\n return pol\n", "path": "lmfdb/characters/TinyConrey.py"}]} | 3,563 | 176 |
gh_patches_debug_7791 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imprprovement for bond parser
Currently, the bond parser is not supporting to check if the primary slave is configured with "primary_reselect=failure".
Parser PR: https://github.com/RedHatInsights/insights-core/pull/2477
</issue>
<code>
[start of insights/parsers/bond.py]
1 """
2 Bond - file ``/proc/net/bonding``
3 =================================
4
5 Provides plugins access to the network bonding information gathered from
6 all the files starteing with "bond." located in the
7 ``/proc/net/bonding`` directory.
8
9 Typical content of ``bond.*`` file is::
10
11 Ethernet Channel Bonding Driver: v3.2.4 (January 28, 2008)
12
13 Bonding Mode: IEEE 802.3ad Dynamic link aggregation
14 Transmit Hash Policy: layer2 (0)
15 MII Status: up
16 MII Polling Interval (ms): 500
17 Up Delay (ms): 0
18 Down Delay (ms): 0
19
20 802.3ad info
21 LACP rate: slow
22 Active Aggregator Info:
23 Aggregator ID: 3
24 Number of ports: 1
25 Actor Key: 17
26 Partner Key: 1
27 Partner Mac Address: 00:00:00:00:00:00
28
29 Slave Interface: eth1
30 MII Status: up
31 Link Failure Count: 0
32 Permanent HW addr: 00:16:35:5e:42:fc
33 Aggregator ID: 3
34
35 Slave Interface: eth2
36 MII Status: up
37 Link Failure Count: 0
38 Permanent HW addr: 00:16:35:5e:02:7e
39 Aggregator ID: 2
40
41 Data is modeled as an array of ``Bond`` objects (``bond`` being a
42 pattern file specification gathering data from files located in
43 ``/proc/net/bonding``.
44
45 Examples:
46 >>> type(bond_info)
47 <class 'insights.parsers.bond.Bond'>
48 >>> bond_info.bond_mode
49 '4'
50 >>> bond_info.partner_mac_address
51 '00:00:00:00:00:00'
52 >>> bond_info.slave_interface
53 ['eth1', 'eth2']
54 >>> bond_info.aggregator_id
55 ['3', '3', '2']
56 >>> bond_info.xmit_hash_policy
57 'layer2'
58 >>> bond_info.active_slave
59 >>> bond_info.slave_duplex
60 ['full', 'full']
61 >>> bond_info.slave_speed
62 ['1000 Mbps', '1000 Mbps']
63 """
64
65 from insights import Parser, parser, get_active_lines
66 from insights.specs import Specs
67 from insights.parsers import ParseException
68
69
70 """dict: bonding mode parameter string linked to bond type index."""
71 BOND_PREFIX_MAP = {
72 'load balancing (round-robin)': '0',
73 'fault-tolerance (active-backup)': '1',
74 'fault-tolerance (active-backup) (fail_over_mac active)': '1',
75 'load balancing (xor)': '2',
76 'fault-tolerance (broadcast)': '3',
77 'IEEE 802.3ad Dynamic link aggregation': '4',
78 'transmit load balancing': '5',
79 'adaptive load balancing': '6'
80 }
81
82
83 @parser(Specs.bond)
84 class Bond(Parser):
85 """
86 Models the ``/proc/net/bonding`` file.
87
88 Currently used information from ``/proc/net/bonding`` includes
89 the "bond mode" and "partner mac address".
90 """
91
92 def parse_content(self, content):
93 self._bond_mode = None
94 self._partner_mac_address = None
95 self._active_slave = None
96 self.xmit_hash_policy = None
97 self._arp_polling_interval = None
98 self._arp_ip_target = None
99 self._slave_interface = []
100 self._aggregator_id = []
101 self._mii_status = []
102 self._slave_link_failure_count = []
103 self._slave_speed = []
104 self._slave_duplex = []
105 self._primary_slave = None
106
107 for line in get_active_lines(content):
108 if line.startswith("Bonding Mode: "):
109 raw_mode = line.split(":", 1)[1].strip()
110 self._bond_mode = raw_mode
111 if raw_mode in BOND_PREFIX_MAP:
112 self._bond_mode = BOND_PREFIX_MAP[raw_mode]
113 else:
114 raise ParseException("Unrecognised bonding mode '{b}'".format(b=raw_mode))
115 elif line.startswith("Partner Mac Address: "):
116 self._partner_mac_address = line.split(":", 1)[1].strip()
117 elif line.startswith("Slave Interface: "):
118 self._slave_interface.append(line.split(":", 1)[1].strip())
119 elif line.strip().startswith("Aggregator ID: "):
120 self._aggregator_id.append(line.strip().split(':', 1)[1].strip())
121 elif line.strip().startswith("Transmit Hash Policy"):
122 # No need of values in bracket:
123 # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp
124 self.xmit_hash_policy = line.split(":", 1)[1].split()[0]
125 elif line.strip().startswith("Currently Active Slave"):
126 self._active_slave = line.split(":", 1)[1].split()[0]
127 elif line.strip().startswith("MII Status: "):
128 self._mii_status.append(line.strip().split(':', 1)[1].strip())
129 elif line.strip().startswith("Link Failure Count: "):
130 self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip())
131 elif line.strip().startswith("Speed: "):
132 self._slave_speed.append(line.strip().split(':', 1)[1].strip())
133 elif line.strip().startswith("Duplex: "):
134 self._slave_duplex.append(line.strip().split(':', 1)[1].strip())
135 elif line.strip().startswith("ARP Polling Interval (ms):"):
136 self._arp_polling_interval = line.strip().split(':', 1)[1].strip()
137 elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"):
138 self._arp_ip_target = line.strip().split(':', 1)[1].strip()
139 elif line.strip().startswith("Primary Slave"):
140 self._primary_slave = line.split(":", 1)[1].split()[0]
141
142 @property
143 def bond_mode(self):
144 """Returns the bond mode number as a string, or if there is no
145 known mapping to a number, the raw "Bonding Mode" value.
146 ``None`` is returned if no "Bonding Mode" key is found.
147 """
148 return self._bond_mode
149
150 @property
151 def partner_mac_address(self):
152 """Returns the value of the "Partner Mac Address" in the bond
153 file if the key/value exists. If the key is not in the bond
154 file, ``None`` is returned.
155 """
156 return self._partner_mac_address
157
158 @property
159 def slave_interface(self):
160 """Returns all the slave interfaces of in the bond file wrapped
161 a list if the key/value exists. If the key is not in the
162 bond file, ``[]`` is returned.
163 """
164 return self._slave_interface
165
166 @property
167 def aggregator_id(self):
168 """Returns all the aggregator id of in the bond file wrapped
169 a list if the key/value exists. If the key is not in the
170 bond file, ``[]`` is returned.
171 """
172 return self._aggregator_id
173
174 @property
175 def active_slave(self):
176 """Returns the active slave of the "Currently Active Slave" in the bond
177 file if key/value exists. If the key is not in the bond file, ``None``
178 is returned.
179 """
180 return self._active_slave
181
182 @property
183 def mii_status(self):
184 """Returns the master and all the slaves "MII Status" value in the bond file wrapped
185 a list if the key/value exists. If the key is not in the
186 bond file, ``[]`` is returned.
187 """
188 return self._mii_status
189
190 @property
191 def slave_link_failure_count(self):
192 """Returns all the slaves "Link Failure Count" value in the bond file wrapped
193 a list if the key/value exists. If the key is not in the
194 bond file, ``[]`` is returned.
195 """
196 return self._slave_link_failure_count
197
198 @property
199 def slave_speed(self):
200 """Returns all the slaves "Speed" value in the bond file wrapped
201 a list if the key/value exists. If the key is not in the
202 bond file, ``[]`` is returned.
203 """
204 return self._slave_speed
205
206 @property
207 def slave_duplex(self):
208 """Returns all the slave "Duplex" value in the bond file wrapped
209 a list if the key/value exists. If the key is not in the
210 bond file, ``[]`` is returned.
211 """
212 return self._slave_duplex
213
214 @property
215 def arp_polling_interval(self):
216 """Returns the arp polling interval as a string. ``None`` is returned
217 if no "ARP Polling Interval (ms)" key is found.
218 """
219 return self._arp_polling_interval
220
221 @property
222 def arp_ip_target(self):
223 """Returns the arp ip target as a string. ``None`` is returned
224 if no "ARP IP target/s (n.n.n.n form)" key is found.
225 """
226 return self._arp_ip_target
227
228 @property
229 def primary_slave(self):
230 """Returns the "Primary Slave" in the bond file if key/value exists.
231 If the key is not in the bond file, ``None`` is returned.
232 """
233 return self._primary_slave
234
[end of insights/parsers/bond.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py
--- a/insights/parsers/bond.py
+++ b/insights/parsers/bond.py
@@ -137,7 +137,7 @@
elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"):
self._arp_ip_target = line.strip().split(':', 1)[1].strip()
elif line.strip().startswith("Primary Slave"):
- self._primary_slave = line.split(":", 1)[1].split()[0]
+ self._primary_slave = line.split(":", 1)[1].strip()
@property
def bond_mode(self):
| {"golden_diff": "diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py\n--- a/insights/parsers/bond.py\n+++ b/insights/parsers/bond.py\n@@ -137,7 +137,7 @@\n elif line.strip().startswith(\"ARP IP target/s (n.n.n.n form):\"):\n self._arp_ip_target = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"Primary Slave\"):\n- self._primary_slave = line.split(\":\", 1)[1].split()[0]\n+ self._primary_slave = line.split(\":\", 1)[1].strip()\n \n @property\n def bond_mode(self):\n", "issue": "Imprprovement for bond parser\nCurrently, the bond parser is not supporting to check if the primary slave is configured with \"primary_reselect=failure\".\r\n\r\nParser PR: https://github.com/RedHatInsights/insights-core/pull/2477\r\n\n", "before_files": [{"content": "\"\"\"\nBond - file ``/proc/net/bonding``\n=================================\n\nProvides plugins access to the network bonding information gathered from\nall the files starteing with \"bond.\" located in the\n``/proc/net/bonding`` directory.\n\nTypical content of ``bond.*`` file is::\n\n Ethernet Channel Bonding Driver: v3.2.4 (January 28, 2008)\n\n Bonding Mode: IEEE 802.3ad Dynamic link aggregation\n Transmit Hash Policy: layer2 (0)\n MII Status: up\n MII Polling Interval (ms): 500\n Up Delay (ms): 0\n Down Delay (ms): 0\n\n 802.3ad info\n LACP rate: slow\n Active Aggregator Info:\n Aggregator ID: 3\n Number of ports: 1\n Actor Key: 17\n Partner Key: 1\n Partner Mac Address: 00:00:00:00:00:00\n\n Slave Interface: eth1\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:42:fc\n Aggregator ID: 3\n\n Slave Interface: eth2\n MII Status: up\n Link Failure Count: 0\n Permanent HW addr: 00:16:35:5e:02:7e\n Aggregator ID: 2\n\nData is modeled as an array of ``Bond`` objects (``bond`` being a\npattern file specification gathering data from files located in\n``/proc/net/bonding``.\n\nExamples:\n >>> type(bond_info)\n <class 'insights.parsers.bond.Bond'>\n >>> bond_info.bond_mode\n '4'\n >>> bond_info.partner_mac_address\n '00:00:00:00:00:00'\n >>> bond_info.slave_interface\n ['eth1', 'eth2']\n >>> bond_info.aggregator_id\n ['3', '3', '2']\n >>> bond_info.xmit_hash_policy\n 'layer2'\n >>> bond_info.active_slave\n >>> bond_info.slave_duplex\n ['full', 'full']\n >>> bond_info.slave_speed\n ['1000 Mbps', '1000 Mbps']\n\"\"\"\n\nfrom insights import Parser, parser, get_active_lines\nfrom insights.specs import Specs\nfrom insights.parsers import ParseException\n\n\n\"\"\"dict: bonding mode parameter string linked to bond type index.\"\"\"\nBOND_PREFIX_MAP = {\n 'load balancing (round-robin)': '0',\n 'fault-tolerance (active-backup)': '1',\n 'fault-tolerance (active-backup) (fail_over_mac active)': '1',\n 'load balancing (xor)': '2',\n 'fault-tolerance (broadcast)': '3',\n 'IEEE 802.3ad Dynamic link aggregation': '4',\n 'transmit load balancing': '5',\n 'adaptive load balancing': '6'\n}\n\n\n@parser(Specs.bond)\nclass Bond(Parser):\n \"\"\"\n Models the ``/proc/net/bonding`` file.\n\n Currently used information from ``/proc/net/bonding`` includes\n the \"bond mode\" and \"partner mac address\".\n \"\"\"\n\n def parse_content(self, content):\n self._bond_mode = None\n self._partner_mac_address = None\n self._active_slave = None\n self.xmit_hash_policy = None\n self._arp_polling_interval = None\n self._arp_ip_target = None\n self._slave_interface = []\n self._aggregator_id = []\n self._mii_status = []\n self._slave_link_failure_count = []\n self._slave_speed = []\n self._slave_duplex = []\n self._primary_slave = None\n\n for line in get_active_lines(content):\n if line.startswith(\"Bonding Mode: \"):\n raw_mode = line.split(\":\", 1)[1].strip()\n self._bond_mode = raw_mode\n if raw_mode in BOND_PREFIX_MAP:\n self._bond_mode = BOND_PREFIX_MAP[raw_mode]\n else:\n raise ParseException(\"Unrecognised bonding mode '{b}'\".format(b=raw_mode))\n elif line.startswith(\"Partner Mac Address: \"):\n self._partner_mac_address = line.split(\":\", 1)[1].strip()\n elif line.startswith(\"Slave Interface: \"):\n self._slave_interface.append(line.split(\":\", 1)[1].strip())\n elif line.strip().startswith(\"Aggregator ID: \"):\n self._aggregator_id.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Transmit Hash Policy\"):\n # No need of values in bracket:\n # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp\n self.xmit_hash_policy = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"Currently Active Slave\"):\n self._active_slave = line.split(\":\", 1)[1].split()[0]\n elif line.strip().startswith(\"MII Status: \"):\n self._mii_status.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Link Failure Count: \"):\n self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Speed: \"):\n self._slave_speed.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"Duplex: \"):\n self._slave_duplex.append(line.strip().split(':', 1)[1].strip())\n elif line.strip().startswith(\"ARP Polling Interval (ms):\"):\n self._arp_polling_interval = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"ARP IP target/s (n.n.n.n form):\"):\n self._arp_ip_target = line.strip().split(':', 1)[1].strip()\n elif line.strip().startswith(\"Primary Slave\"):\n self._primary_slave = line.split(\":\", 1)[1].split()[0]\n\n @property\n def bond_mode(self):\n \"\"\"Returns the bond mode number as a string, or if there is no\n known mapping to a number, the raw \"Bonding Mode\" value.\n ``None`` is returned if no \"Bonding Mode\" key is found.\n \"\"\"\n return self._bond_mode\n\n @property\n def partner_mac_address(self):\n \"\"\"Returns the value of the \"Partner Mac Address\" in the bond\n file if the key/value exists. If the key is not in the bond\n file, ``None`` is returned.\n \"\"\"\n return self._partner_mac_address\n\n @property\n def slave_interface(self):\n \"\"\"Returns all the slave interfaces of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_interface\n\n @property\n def aggregator_id(self):\n \"\"\"Returns all the aggregator id of in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._aggregator_id\n\n @property\n def active_slave(self):\n \"\"\"Returns the active slave of the \"Currently Active Slave\" in the bond\n file if key/value exists. If the key is not in the bond file, ``None``\n is returned.\n \"\"\"\n return self._active_slave\n\n @property\n def mii_status(self):\n \"\"\"Returns the master and all the slaves \"MII Status\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._mii_status\n\n @property\n def slave_link_failure_count(self):\n \"\"\"Returns all the slaves \"Link Failure Count\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_link_failure_count\n\n @property\n def slave_speed(self):\n \"\"\"Returns all the slaves \"Speed\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_speed\n\n @property\n def slave_duplex(self):\n \"\"\"Returns all the slave \"Duplex\" value in the bond file wrapped\n a list if the key/value exists. If the key is not in the\n bond file, ``[]`` is returned.\n \"\"\"\n return self._slave_duplex\n\n @property\n def arp_polling_interval(self):\n \"\"\"Returns the arp polling interval as a string. ``None`` is returned\n if no \"ARP Polling Interval (ms)\" key is found.\n \"\"\"\n return self._arp_polling_interval\n\n @property\n def arp_ip_target(self):\n \"\"\"Returns the arp ip target as a string. ``None`` is returned\n if no \"ARP IP target/s (n.n.n.n form)\" key is found.\n \"\"\"\n return self._arp_ip_target\n\n @property\n def primary_slave(self):\n \"\"\"Returns the \"Primary Slave\" in the bond file if key/value exists.\n If the key is not in the bond file, ``None`` is returned.\n \"\"\"\n return self._primary_slave\n", "path": "insights/parsers/bond.py"}]} | 3,316 | 155 |
gh_patches_debug_34396 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-1341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CheckpointSaver: incorrect labelling of final model
**Describe the bug**
When using a `CheckpointSaver` among the validation handlers, the argument `save_final=True` results in every epoch being stored under `net_final_iteration=1.pt` when `final_filename=None`. I interpret the docs (`default to checkpoint_final_iteration=N.pt`) that the file will be renamed after each completed epoch.
**To Reproduce**
add `save_final=True` to the `CheckpointHandler` in the Lung Segmentation Tutorial
**Expected behavior**
`net_final_iteration=1.pt` -> `net_final_iteration=2.pt` -> ... -> `net_final_iteration=MAX_EPOCHS.pt`
**Screenshots**
The logging function also prints incorrect iteration numbers:
```
INFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt
...
INFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt
...
INFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt
```
**Environment**
Ensuring you use the relevant python executable, please paste the output of:
```
MONAI version: 0.3.0+95.g535561e
Python version: 3.8.6 | packaged by conda-forge | (default, Oct 7 2020, 18:42:56) [Clang 10.0.1 ]
OS version: Darwin (19.6.0)
Numpy version: 1.19.2
Pytorch version: 1.7.0
MONAI flags: HAS_EXT = False, USE_COMPILED = False
Optional dependencies:
Pytorch Ignite version: 0.4.2
Nibabel version: 3.2.0
scikit-image version: NOT INSTALLED or UNKNOWN VERSION.
Pillow version: 8.0.1
Tensorboard version: 2.4.0
gdown version: NOT INSTALLED or UNKNOWN VERSION.
TorchVision version: 0.8.1
ITK version: NOT INSTALLED or UNKNOWN VERSION.
tqdm version: 4.53.0
lmdb version: NOT INSTALLED or UNKNOWN VERSION.
For details about installing the optional dependencies, please visit:
https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies
```
**Additional context**
Using `SupervisedEvaluator` and `SupervisedTrainer`.
</issue>
<code>
[start of monai/handlers/checkpoint_saver.py]
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import logging
13 from typing import TYPE_CHECKING, Dict, Optional
14
15 from monai.utils import exact_version, optional_import
16
17 Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events")
18 Checkpoint, _ = optional_import("ignite.handlers", "0.4.2", exact_version, "Checkpoint")
19 BaseSaveHandler, _ = optional_import("ignite.handlers.checkpoint", "0.4.2", exact_version, "BaseSaveHandler")
20
21 if TYPE_CHECKING:
22 from ignite.engine import Engine
23 from ignite.handlers import DiskSaver
24 else:
25 Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine")
26 DiskSaver, _ = optional_import("ignite.handlers", "0.4.2", exact_version, "DiskSaver")
27
28
29 class CheckpointSaver:
30 """
31 CheckpointSaver acts as an Ignite handler to save checkpoint data into files.
32 It supports to save according to metrics result, epoch number, iteration number
33 and last model or exception.
34
35 Args:
36 save_dir: the target directory to save the checkpoints.
37 save_dict: source objects that save to the checkpoint. examples::
38
39 {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
40
41 name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
42 file_prefix: prefix for the filenames to which objects will be saved.
43 save_final: whether to save checkpoint or session at final iteration or exception.
44 If checkpoints are to be saved when an exception is raised, put this handler before
45 `StatsHandler` in the handler list, because the logic with Ignite can only trigger
46 the first attached handler for `EXCEPTION_RAISED` event.
47 final_filename: set a fixed filename to save the final model if `save_final=True`.
48 If None, default to `checkpoint_final_iteration=N.pt`.
49 save_key_metric: whether to save checkpoint or session when the value of key_metric is
50 higher than all the previous values during training.keep 4 decimal places of metric,
51 checkpoint name is: {file_prefix}_key_metric=0.XXXX.pth.
52 key_metric_name: the name of key_metric in ignite metrics dictionary.
53 If None, use `engine.state.key_metric` instead.
54 key_metric_n_saved: save top N checkpoints or sessions, sorted by the value of key
55 metric in descending order.
56 key_metric_filename: set a fixed filename to set the best metric model, if not None,
57 `key_metric_n_saved` should be 1 and only keep the best metric model.
58 epoch_level: save checkpoint during training for every N epochs or every N iterations.
59 `True` is epoch level, `False` is iteration level.
60 save_interval: save checkpoint every N epochs, default is 0 to save no checkpoint.
61 n_saved: save latest N checkpoints of epoch level or iteration level, 'None' is to save all.
62
63 Note:
64 CheckpointHandler can be used during training, validation or evaluation.
65 example of saved files:
66
67 - checkpoint_iteration=400.pt
68 - checkpoint_iteration=800.pt
69 - checkpoint_epoch=1.pt
70 - checkpoint_final_iteration=1000.pt
71 - checkpoint_key_metric=0.9387.pt
72
73 """
74
75 def __init__(
76 self,
77 save_dir: str,
78 save_dict: Dict,
79 name: Optional[str] = None,
80 file_prefix: str = "",
81 save_final: bool = False,
82 final_filename: Optional[str] = None,
83 save_key_metric: bool = False,
84 key_metric_name: Optional[str] = None,
85 key_metric_n_saved: int = 1,
86 key_metric_filename: Optional[str] = None,
87 epoch_level: bool = True,
88 save_interval: int = 0,
89 n_saved: Optional[int] = None,
90 ) -> None:
91 assert save_dir is not None, "must provide directory to save the checkpoints."
92 self.save_dir = save_dir
93 assert save_dict is not None and len(save_dict) > 0, "must provide source objects to save."
94 self.save_dict = save_dict
95 self.logger = logging.getLogger(name)
96 self.epoch_level = epoch_level
97 self.save_interval = save_interval
98 self._final_checkpoint = self._key_metric_checkpoint = self._interval_checkpoint = None
99 self._name = name
100
101 class _DiskSaver(DiskSaver):
102 """
103 Enhance the DiskSaver to support fixed filename.
104
105 """
106
107 def __init__(self, dirname: str, filename: Optional[str] = None):
108 super().__init__(dirname=dirname, require_empty=False)
109 self.filename = filename
110
111 def __call__(self, checkpoint: Dict, filename: str, metadata: Optional[Dict] = None) -> None:
112 if self.filename is not None:
113 filename = self.filename
114 super().__call__(checkpoint=checkpoint, filename=filename, metadata=metadata)
115
116 def remove(self, filename: str) -> None:
117 if self.filename is not None:
118 filename = self.filename
119 super().remove(filename=filename)
120
121 if save_final:
122
123 def _final_func(engine: Engine):
124 return engine.state.iteration
125
126 self._final_checkpoint = Checkpoint(
127 to_save=self.save_dict,
128 save_handler=_DiskSaver(dirname=self.save_dir, filename=final_filename),
129 filename_prefix=file_prefix,
130 score_function=_final_func,
131 score_name="final_iteration",
132 )
133
134 if save_key_metric:
135
136 def _score_func(engine: Engine):
137 if isinstance(key_metric_name, str):
138 metric_name = key_metric_name
139 elif hasattr(engine.state, "key_metric_name") and isinstance(engine.state.key_metric_name, str):
140 metric_name = engine.state.key_metric_name
141 else:
142 raise ValueError(
143 f"Incompatible values: save_key_metric=True and key_metric_name={key_metric_name}."
144 )
145 return round(engine.state.metrics[metric_name], 4)
146
147 if key_metric_filename is not None and key_metric_n_saved > 1:
148 raise ValueError("if using fixed filename to save the best metric model, we should only save 1 model.")
149
150 self._key_metric_checkpoint = Checkpoint(
151 to_save=self.save_dict,
152 save_handler=_DiskSaver(dirname=self.save_dir, filename=key_metric_filename),
153 filename_prefix=file_prefix,
154 score_function=_score_func,
155 score_name="key_metric",
156 n_saved=key_metric_n_saved,
157 )
158
159 if save_interval > 0:
160
161 def _interval_func(engine: Engine):
162 return engine.state.epoch if self.epoch_level else engine.state.iteration
163
164 self._interval_checkpoint = Checkpoint(
165 to_save=self.save_dict,
166 save_handler=_DiskSaver(dirname=self.save_dir),
167 filename_prefix=file_prefix,
168 score_function=_interval_func,
169 score_name="epoch" if self.epoch_level else "iteration",
170 n_saved=n_saved,
171 )
172
173 def attach(self, engine: Engine) -> None:
174 """
175 Args:
176 engine: Ignite Engine, it can be a trainer, validator or evaluator.
177 """
178 if self._name is None:
179 self.logger = engine.logger
180 if self._final_checkpoint is not None:
181 engine.add_event_handler(Events.COMPLETED, self.completed)
182 engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised)
183 if self._key_metric_checkpoint is not None:
184 engine.add_event_handler(Events.EPOCH_COMPLETED, self.metrics_completed)
185 if self._interval_checkpoint is not None:
186 if self.epoch_level:
187 engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.save_interval), self.interval_completed)
188 else:
189 engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed)
190
191 def completed(self, engine: Engine) -> None:
192 """Callback for train or validation/evaluation completed Event.
193 Save final checkpoint if configure save_final is True.
194
195 Args:
196 engine: Ignite Engine, it can be a trainer, validator or evaluator.
197 """
198 assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
199 self._final_checkpoint(engine)
200 assert self.logger is not None
201 assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
202 self.logger.info(f"Train completed, saved final checkpoint: {self._final_checkpoint.last_checkpoint}")
203
204 def exception_raised(self, engine: Engine, e: Exception) -> None:
205 """Callback for train or validation/evaluation exception raised Event.
206 Save current data as final checkpoint if configure save_final is True. This callback may be skipped
207 because the logic with Ignite can only trigger the first attached handler for `EXCEPTION_RAISED` event.
208
209 Args:
210 engine: Ignite Engine, it can be a trainer, validator or evaluator.
211 e: the exception caught in Ignite during engine.run().
212 """
213 assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
214 self._final_checkpoint(engine)
215 assert self.logger is not None
216 assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
217 self.logger.info(f"Exception_raised, saved exception checkpoint: {self._final_checkpoint.last_checkpoint}")
218 raise e
219
220 def metrics_completed(self, engine: Engine) -> None:
221 """Callback to compare metrics and save models in train or validation when epoch completed.
222
223 Args:
224 engine: Ignite Engine, it can be a trainer, validator or evaluator.
225 """
226 assert callable(self._key_metric_checkpoint), "Error: _key_metric_checkpoint function not specified."
227 self._key_metric_checkpoint(engine)
228
229 def interval_completed(self, engine: Engine) -> None:
230 """Callback for train epoch/iteration completed Event.
231 Save checkpoint if configure save_interval = N
232
233 Args:
234 engine: Ignite Engine, it can be a trainer, validator or evaluator.
235 """
236 assert callable(self._interval_checkpoint), "Error: _interval_checkpoint function not specified."
237 self._interval_checkpoint(engine)
238 assert self.logger is not None
239 assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
240 if self.epoch_level:
241 self.logger.info(f"Saved checkpoint at epoch: {engine.state.epoch}")
242 else:
243 self.logger.info(f"Saved checkpoint at iteration: {engine.state.iteration}")
244
[end of monai/handlers/checkpoint_saver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py
--- a/monai/handlers/checkpoint_saver.py
+++ b/monai/handlers/checkpoint_saver.py
@@ -188,6 +188,13 @@
else:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed)
+ def _delete_previous_final_ckpt(self):
+ saved = self._final_checkpoint._saved
+ if len(saved) > 0:
+ item = saved.pop(0)
+ self._final_checkpoint.save_handler.remove(item.filename)
+ self.logger.info(f"Deleted previous saved final checkpoint: {item.filename}")
+
def completed(self, engine: Engine) -> None:
"""Callback for train or validation/evaluation completed Event.
Save final checkpoint if configure save_final is True.
@@ -196,6 +203,8 @@
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
+ # delete previous saved final checkpoint if existing
+ self._delete_previous_final_ckpt()
self._final_checkpoint(engine)
assert self.logger is not None
assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
@@ -211,6 +220,8 @@
e: the exception caught in Ignite during engine.run().
"""
assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
+ # delete previous saved final checkpoint if existing
+ self._delete_previous_final_ckpt()
self._final_checkpoint(engine)
assert self.logger is not None
assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
| {"golden_diff": "diff --git a/monai/handlers/checkpoint_saver.py b/monai/handlers/checkpoint_saver.py\n--- a/monai/handlers/checkpoint_saver.py\n+++ b/monai/handlers/checkpoint_saver.py\n@@ -188,6 +188,13 @@\n else:\n engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed)\n \n+ def _delete_previous_final_ckpt(self):\n+ saved = self._final_checkpoint._saved\n+ if len(saved) > 0:\n+ item = saved.pop(0)\n+ self._final_checkpoint.save_handler.remove(item.filename)\n+ self.logger.info(f\"Deleted previous saved final checkpoint: {item.filename}\")\n+\n def completed(self, engine: Engine) -> None:\n \"\"\"Callback for train or validation/evaluation completed Event.\n Save final checkpoint if configure save_final is True.\n@@ -196,6 +203,8 @@\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n assert callable(self._final_checkpoint), \"Error: _final_checkpoint function not specified.\"\n+ # delete previous saved final checkpoint if existing\n+ self._delete_previous_final_ckpt()\n self._final_checkpoint(engine)\n assert self.logger is not None\n assert hasattr(self.logger, \"info\"), \"Error, provided logger has not info attribute.\"\n@@ -211,6 +220,8 @@\n e: the exception caught in Ignite during engine.run().\n \"\"\"\n assert callable(self._final_checkpoint), \"Error: _final_checkpoint function not specified.\"\n+ # delete previous saved final checkpoint if existing\n+ self._delete_previous_final_ckpt()\n self._final_checkpoint(engine)\n assert self.logger is not None\n assert hasattr(self.logger, \"info\"), \"Error, provided logger has not info attribute.\"\n", "issue": "CheckpointSaver: incorrect labelling of final model\n**Describe the bug**\r\nWhen using a `CheckpointSaver` among the validation handlers, the argument `save_final=True` results in every epoch being stored under `net_final_iteration=1.pt` when `final_filename=None`. I interpret the docs (`default to checkpoint_final_iteration=N.pt`) that the file will be renamed after each completed epoch.\r\n\r\n**To Reproduce**\r\nadd `save_final=True` to the `CheckpointHandler` in the Lung Segmentation Tutorial\r\n\r\n**Expected behavior**\r\n`net_final_iteration=1.pt` -> `net_final_iteration=2.pt` -> ... -> `net_final_iteration=MAX_EPOCHS.pt`\r\n\r\n**Screenshots**\r\nThe logging function also prints incorrect iteration numbers:\r\n\r\n```\r\nINFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt\r\n...\r\nINFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt\r\n...\r\nINFO:ignite.engine.engine.SupervisedEvaluator:Train completed, saved final checkpoint: net_final_iteration=1.pt\r\n```\r\n\r\n**Environment**\r\n\r\nEnsuring you use the relevant python executable, please paste the output of:\r\n\r\n```\r\nMONAI version: 0.3.0+95.g535561e\r\nPython version: 3.8.6 | packaged by conda-forge | (default, Oct 7 2020, 18:42:56) [Clang 10.0.1 ]\r\nOS version: Darwin (19.6.0)\r\nNumpy version: 1.19.2\r\nPytorch version: 1.7.0\r\nMONAI flags: HAS_EXT = False, USE_COMPILED = False\r\n\r\nOptional dependencies:\r\nPytorch Ignite version: 0.4.2\r\nNibabel version: 3.2.0\r\nscikit-image version: NOT INSTALLED or UNKNOWN VERSION.\r\nPillow version: 8.0.1\r\nTensorboard version: 2.4.0\r\ngdown version: NOT INSTALLED or UNKNOWN VERSION.\r\nTorchVision version: 0.8.1\r\nITK version: NOT INSTALLED or UNKNOWN VERSION.\r\ntqdm version: 4.53.0\r\nlmdb version: NOT INSTALLED or UNKNOWN VERSION.\r\n\r\nFor details about installing the optional dependencies, please visit:\r\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\r\n```\r\n\r\n**Additional context**\r\nUsing `SupervisedEvaluator` and `SupervisedTrainer`.\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.4.2\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.4.2\", exact_version, \"Checkpoint\")\nBaseSaveHandler, _ = optional_import(\"ignite.handlers.checkpoint\", \"0.4.2\", exact_version, \"BaseSaveHandler\")\n\nif TYPE_CHECKING:\n from ignite.engine import Engine\n from ignite.handlers import DiskSaver\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.4.2\", exact_version, \"Engine\")\n DiskSaver, _ = optional_import(\"ignite.handlers\", \"0.4.2\", exact_version, \"DiskSaver\")\n\n\nclass CheckpointSaver:\n \"\"\"\n CheckpointSaver acts as an Ignite handler to save checkpoint data into files.\n It supports to save according to metrics result, epoch number, iteration number\n and last model or exception.\n\n Args:\n save_dir: the target directory to save the checkpoints.\n save_dict: source objects that save to the checkpoint. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n file_prefix: prefix for the filenames to which objects will be saved.\n save_final: whether to save checkpoint or session at final iteration or exception.\n If checkpoints are to be saved when an exception is raised, put this handler before\n `StatsHandler` in the handler list, because the logic with Ignite can only trigger\n the first attached handler for `EXCEPTION_RAISED` event.\n final_filename: set a fixed filename to save the final model if `save_final=True`.\n If None, default to `checkpoint_final_iteration=N.pt`.\n save_key_metric: whether to save checkpoint or session when the value of key_metric is\n higher than all the previous values during training.keep 4 decimal places of metric,\n checkpoint name is: {file_prefix}_key_metric=0.XXXX.pth.\n key_metric_name: the name of key_metric in ignite metrics dictionary.\n If None, use `engine.state.key_metric` instead.\n key_metric_n_saved: save top N checkpoints or sessions, sorted by the value of key\n metric in descending order.\n key_metric_filename: set a fixed filename to set the best metric model, if not None,\n `key_metric_n_saved` should be 1 and only keep the best metric model.\n epoch_level: save checkpoint during training for every N epochs or every N iterations.\n `True` is epoch level, `False` is iteration level.\n save_interval: save checkpoint every N epochs, default is 0 to save no checkpoint.\n n_saved: save latest N checkpoints of epoch level or iteration level, 'None' is to save all.\n\n Note:\n CheckpointHandler can be used during training, validation or evaluation.\n example of saved files:\n\n - checkpoint_iteration=400.pt\n - checkpoint_iteration=800.pt\n - checkpoint_epoch=1.pt\n - checkpoint_final_iteration=1000.pt\n - checkpoint_key_metric=0.9387.pt\n\n \"\"\"\n\n def __init__(\n self,\n save_dir: str,\n save_dict: Dict,\n name: Optional[str] = None,\n file_prefix: str = \"\",\n save_final: bool = False,\n final_filename: Optional[str] = None,\n save_key_metric: bool = False,\n key_metric_name: Optional[str] = None,\n key_metric_n_saved: int = 1,\n key_metric_filename: Optional[str] = None,\n epoch_level: bool = True,\n save_interval: int = 0,\n n_saved: Optional[int] = None,\n ) -> None:\n assert save_dir is not None, \"must provide directory to save the checkpoints.\"\n self.save_dir = save_dir\n assert save_dict is not None and len(save_dict) > 0, \"must provide source objects to save.\"\n self.save_dict = save_dict\n self.logger = logging.getLogger(name)\n self.epoch_level = epoch_level\n self.save_interval = save_interval\n self._final_checkpoint = self._key_metric_checkpoint = self._interval_checkpoint = None\n self._name = name\n\n class _DiskSaver(DiskSaver):\n \"\"\"\n Enhance the DiskSaver to support fixed filename.\n\n \"\"\"\n\n def __init__(self, dirname: str, filename: Optional[str] = None):\n super().__init__(dirname=dirname, require_empty=False)\n self.filename = filename\n\n def __call__(self, checkpoint: Dict, filename: str, metadata: Optional[Dict] = None) -> None:\n if self.filename is not None:\n filename = self.filename\n super().__call__(checkpoint=checkpoint, filename=filename, metadata=metadata)\n\n def remove(self, filename: str) -> None:\n if self.filename is not None:\n filename = self.filename\n super().remove(filename=filename)\n\n if save_final:\n\n def _final_func(engine: Engine):\n return engine.state.iteration\n\n self._final_checkpoint = Checkpoint(\n to_save=self.save_dict,\n save_handler=_DiskSaver(dirname=self.save_dir, filename=final_filename),\n filename_prefix=file_prefix,\n score_function=_final_func,\n score_name=\"final_iteration\",\n )\n\n if save_key_metric:\n\n def _score_func(engine: Engine):\n if isinstance(key_metric_name, str):\n metric_name = key_metric_name\n elif hasattr(engine.state, \"key_metric_name\") and isinstance(engine.state.key_metric_name, str):\n metric_name = engine.state.key_metric_name\n else:\n raise ValueError(\n f\"Incompatible values: save_key_metric=True and key_metric_name={key_metric_name}.\"\n )\n return round(engine.state.metrics[metric_name], 4)\n\n if key_metric_filename is not None and key_metric_n_saved > 1:\n raise ValueError(\"if using fixed filename to save the best metric model, we should only save 1 model.\")\n\n self._key_metric_checkpoint = Checkpoint(\n to_save=self.save_dict,\n save_handler=_DiskSaver(dirname=self.save_dir, filename=key_metric_filename),\n filename_prefix=file_prefix,\n score_function=_score_func,\n score_name=\"key_metric\",\n n_saved=key_metric_n_saved,\n )\n\n if save_interval > 0:\n\n def _interval_func(engine: Engine):\n return engine.state.epoch if self.epoch_level else engine.state.iteration\n\n self._interval_checkpoint = Checkpoint(\n to_save=self.save_dict,\n save_handler=_DiskSaver(dirname=self.save_dir),\n filename_prefix=file_prefix,\n score_function=_interval_func,\n score_name=\"epoch\" if self.epoch_level else \"iteration\",\n n_saved=n_saved,\n )\n\n def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n if self._final_checkpoint is not None:\n engine.add_event_handler(Events.COMPLETED, self.completed)\n engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised)\n if self._key_metric_checkpoint is not None:\n engine.add_event_handler(Events.EPOCH_COMPLETED, self.metrics_completed)\n if self._interval_checkpoint is not None:\n if self.epoch_level:\n engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.save_interval), self.interval_completed)\n else:\n engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed)\n\n def completed(self, engine: Engine) -> None:\n \"\"\"Callback for train or validation/evaluation completed Event.\n Save final checkpoint if configure save_final is True.\n\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n assert callable(self._final_checkpoint), \"Error: _final_checkpoint function not specified.\"\n self._final_checkpoint(engine)\n assert self.logger is not None\n assert hasattr(self.logger, \"info\"), \"Error, provided logger has not info attribute.\"\n self.logger.info(f\"Train completed, saved final checkpoint: {self._final_checkpoint.last_checkpoint}\")\n\n def exception_raised(self, engine: Engine, e: Exception) -> None:\n \"\"\"Callback for train or validation/evaluation exception raised Event.\n Save current data as final checkpoint if configure save_final is True. This callback may be skipped\n because the logic with Ignite can only trigger the first attached handler for `EXCEPTION_RAISED` event.\n\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n e: the exception caught in Ignite during engine.run().\n \"\"\"\n assert callable(self._final_checkpoint), \"Error: _final_checkpoint function not specified.\"\n self._final_checkpoint(engine)\n assert self.logger is not None\n assert hasattr(self.logger, \"info\"), \"Error, provided logger has not info attribute.\"\n self.logger.info(f\"Exception_raised, saved exception checkpoint: {self._final_checkpoint.last_checkpoint}\")\n raise e\n\n def metrics_completed(self, engine: Engine) -> None:\n \"\"\"Callback to compare metrics and save models in train or validation when epoch completed.\n\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n assert callable(self._key_metric_checkpoint), \"Error: _key_metric_checkpoint function not specified.\"\n self._key_metric_checkpoint(engine)\n\n def interval_completed(self, engine: Engine) -> None:\n \"\"\"Callback for train epoch/iteration completed Event.\n Save checkpoint if configure save_interval = N\n\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n assert callable(self._interval_checkpoint), \"Error: _interval_checkpoint function not specified.\"\n self._interval_checkpoint(engine)\n assert self.logger is not None\n assert hasattr(self.logger, \"info\"), \"Error, provided logger has not info attribute.\"\n if self.epoch_level:\n self.logger.info(f\"Saved checkpoint at epoch: {engine.state.epoch}\")\n else:\n self.logger.info(f\"Saved checkpoint at iteration: {engine.state.iteration}\")\n", "path": "monai/handlers/checkpoint_saver.py"}]} | 4,084 | 410 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.